diff options
Diffstat (limited to 'drivers/target')
93 files changed, 64542 insertions, 0 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig new file mode 100644 index 0000000000..92641d3912 --- /dev/null +++ b/drivers/target/Kconfig @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: GPL-2.0-only + +menuconfig TARGET_CORE + tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" + depends on BLOCK + select CONFIGFS_FS + select CRC_T10DIF + select SCSI_COMMON + select SGL_ALLOC + default n + help + Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled + control path for target_core_mod. This includes built-in TCM RAMDISK + subsystem logic for virtual LUN 0 access + +if TARGET_CORE + +config TCM_IBLOCK + tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" + select BLK_DEV_INTEGRITY + help + Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered + access to Linux/Block devices using BIO + +config TCM_FILEIO + tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS" + help + Say Y here to enable the TCM/FILEIO subsystem plugin for buffered + access to Linux/VFS struct file or struct block_device + +config TCM_PSCSI + tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI" + depends on SCSI + help + Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered + passthrough access to Linux/SCSI device + +config TCM_USER2 + tristate "TCM/USER Subsystem Plugin for Linux" + depends on UIO && NET + help + Say Y here to enable the TCM/USER subsystem plugin for a userspace + process to handle requests. This is version 2 of the ABI; version 1 + is obsolete. + +source "drivers/target/loopback/Kconfig" +source "drivers/target/tcm_fc/Kconfig" +source "drivers/target/iscsi/Kconfig" +source "drivers/target/sbp/Kconfig" +source "drivers/target/tcm_remote/Kconfig" + +endif diff --git a/drivers/target/Makefile b/drivers/target/Makefile new file mode 100644 index 0000000000..431b84abfb --- /dev/null +++ b/drivers/target/Makefile @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0 + +target_core_mod-y := target_core_configfs.o \ + target_core_device.o \ + target_core_fabric_configfs.o \ + target_core_fabric_lib.o \ + target_core_hba.o \ + target_core_pr.o \ + target_core_alua.o \ + target_core_tmr.o \ + target_core_tpg.o \ + target_core_transport.o \ + target_core_sbc.o \ + target_core_spc.o \ + target_core_ua.o \ + target_core_rd.o \ + target_core_stat.o \ + target_core_xcopy.o + +obj-$(CONFIG_TARGET_CORE) += target_core_mod.o + +# Subsystem modules +obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o +obj-$(CONFIG_TCM_FILEIO) += target_core_file.o +obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o +obj-$(CONFIG_TCM_USER2) += target_core_user.o + +# Fabric modules +obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ +obj-$(CONFIG_TCM_FC) += tcm_fc/ +obj-$(CONFIG_ISCSI_TARGET) += iscsi/ +obj-$(CONFIG_SBP_TARGET) += sbp/ +obj-$(CONFIG_REMOTE_TARGET) += tcm_remote/ diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig new file mode 100644 index 0000000000..922484ea4e --- /dev/null +++ b/drivers/target/iscsi/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config ISCSI_TARGET + tristate "Linux-iSCSI.org iSCSI Target Mode Stack" + depends on INET + select CRYPTO + select CRYPTO_CRC32C + select CRYPTO_CRC32C_INTEL if X86 + help + Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI + Target Mode Stack. + +source "drivers/target/iscsi/cxgbit/Kconfig" diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile new file mode 100644 index 0000000000..8c9ae96b76 --- /dev/null +++ b/drivers/target/iscsi/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +iscsi_target_mod-y += iscsi_target_parameters.o \ + iscsi_target_seq_pdu_list.o \ + iscsi_target_auth.o \ + iscsi_target_datain_values.o \ + iscsi_target_device.o \ + iscsi_target_erl0.o \ + iscsi_target_erl1.o \ + iscsi_target_erl2.o \ + iscsi_target_login.o \ + iscsi_target_nego.o \ + iscsi_target_nodeattrib.o \ + iscsi_target_tmr.o \ + iscsi_target_tpg.o \ + iscsi_target_util.o \ + iscsi_target.o \ + iscsi_target_configfs.o \ + iscsi_target_stat.o \ + iscsi_target_transport.o + +obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o +obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/ diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig new file mode 100644 index 0000000000..bdeefa75f2 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config ISCSI_TARGET_CXGB4 + tristate "Chelsio iSCSI target offload driver" + depends on ISCSI_TARGET && CHELSIO_T4 && INET + select CHELSIO_LIB + help + To compile this driver as module, choose M here: the module + will be called cxgbit. diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile new file mode 100644 index 0000000000..0dcaf2006f --- /dev/null +++ b/drivers/target/iscsi/cxgbit/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 +ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb +ccflags-y += -I $(srctree)/drivers/target/iscsi + +obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o + +cxgbit-y := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h new file mode 100644 index 0000000000..aff7276296 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + */ + +#ifndef __CXGBIT_H__ +#define __CXGBIT_H__ + +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/idr.h> +#include <linux/completion.h> +#include <linux/netdevice.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/inet.h> +#include <linux/wait.h> +#include <linux/kref.h> +#include <linux/timer.h> +#include <linux/io.h> + +#include <asm/byteorder.h> + +#include <net/net_namespace.h> + +#include <target/iscsi/iscsi_transport.h> +#include <iscsi_target_parameters.h> +#include <iscsi_target_login.h> + +#include "t4_regs.h" +#include "t4_msg.h" +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "l2t.h" +#include "libcxgb_ppm.h" +#include "cxgbit_lro.h" + +extern struct mutex cdev_list_lock; +extern struct list_head cdev_list_head; +struct cxgbit_np; + +struct cxgbit_sock; + +struct cxgbit_cmd { + struct scatterlist sg; + struct cxgbi_task_tag_info ttinfo; + bool setup_ddp; + bool release; +}; + +#define CXGBIT_MAX_ISO_PAYLOAD \ + min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535) + +struct cxgbit_iso_info { + u8 flags; + u32 mpdu; + u32 len; + u32 burst_len; +}; + +enum cxgbit_skcb_flags { + SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */ + SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */ + SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */ + SKCBF_RX_LRO = (1 << 3), /* lro skb */ +}; + +struct cxgbit_skb_rx_cb { + u8 opcode; + void *pdu_cb; + void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *); +}; + +struct cxgbit_skb_tx_cb { + u8 submode; + u32 extra_len; +}; + +union cxgbit_skb_cb { + struct { + u8 flags; + union { + struct cxgbit_skb_tx_cb tx; + struct cxgbit_skb_rx_cb rx; + }; + }; + + struct { + /* This member must be first. */ + struct l2t_skb_cb l2t; + struct sk_buff *wr_next; + }; +}; + +#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0])) +#define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags) +#define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode) +#define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next) +#define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len) +#define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode) +#define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn) +#define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb) + +static inline void *cplhdr(struct sk_buff *skb) +{ + return skb->data; +} + +enum cxgbit_cdev_flags { + CDEV_STATE_UP = 0, + CDEV_ISO_ENABLE, + CDEV_DDP_ENABLE, +}; + +#define NP_INFO_HASH_SIZE 32 + +struct np_info { + struct np_info *next; + struct cxgbit_np *cnp; + unsigned int stid; +}; + +struct cxgbit_list_head { + struct list_head list; + /* device lock */ + spinlock_t lock; +}; + +struct cxgbit_device { + struct list_head list; + struct cxgb4_lld_info lldi; + struct np_info *np_hash_tab[NP_INFO_HASH_SIZE]; + /* np lock */ + spinlock_t np_lock; + u8 selectq[MAX_NPORTS][2]; + struct cxgbit_list_head cskq; + u32 mdsl; + struct kref kref; + unsigned long flags; +}; + +struct cxgbit_wr_wait { + struct completion completion; + int ret; +}; + +enum cxgbit_csk_state { + CSK_STATE_IDLE = 0, + CSK_STATE_LISTEN, + CSK_STATE_CONNECTING, + CSK_STATE_ESTABLISHED, + CSK_STATE_ABORTING, + CSK_STATE_CLOSING, + CSK_STATE_MORIBUND, + CSK_STATE_DEAD, +}; + +enum cxgbit_csk_flags { + CSK_TX_DATA_SENT = 0, + CSK_LOGIN_PDU_DONE, + CSK_LOGIN_DONE, + CSK_DDP_ENABLE, + CSK_ABORT_RPL_WAIT, +}; + +struct cxgbit_sock_common { + struct cxgbit_device *cdev; + struct sockaddr_storage local_addr; + struct sockaddr_storage remote_addr; + struct cxgbit_wr_wait wr_wait; + enum cxgbit_csk_state state; + unsigned long flags; +}; + +struct cxgbit_np { + struct cxgbit_sock_common com; + wait_queue_head_t accept_wait; + struct iscsi_np *np; + struct completion accept_comp; + struct list_head np_accept_list; + /* np accept lock */ + spinlock_t np_accept_lock; + struct kref kref; + unsigned int stid; +}; + +struct cxgbit_sock { + struct cxgbit_sock_common com; + struct cxgbit_np *cnp; + struct iscsit_conn *conn; + struct l2t_entry *l2t; + struct dst_entry *dst; + struct list_head list; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct sk_buff_head ppodq; + struct sk_buff_head backlogq; + struct sk_buff_head skbq; + struct sk_buff *wr_pending_head; + struct sk_buff *wr_pending_tail; + struct sk_buff *skb; + struct sk_buff *lro_skb; + struct sk_buff *lro_hskb; + struct list_head accept_node; + /* socket lock */ + spinlock_t lock; + wait_queue_head_t waitq; + bool lock_owner; + struct kref kref; + u32 max_iso_npdu; + u32 wr_cred; + u32 wr_una_cred; + u32 wr_max_cred; + u32 snd_una; + u32 tid; + u32 snd_nxt; + u32 rcv_nxt; + u32 smac_idx; + u32 tx_chan; + u32 mtu; + u32 write_seq; + u32 rx_credits; + u32 snd_win; + u32 rcv_win; + u16 mss; + u16 emss; + u16 plen; + u16 rss_qid; + u16 txq_idx; + u16 ctrlq_idx; + u8 tos; + u8 port_id; +#define CXGBIT_SUBMODE_HCRC 0x1 +#define CXGBIT_SUBMODE_DCRC 0x2 + u8 submode; +#ifdef CONFIG_CHELSIO_T4_DCB + u8 dcb_priority; +#endif + u8 snd_wscale; +}; + +void _cxgbit_free_cdev(struct kref *kref); +void _cxgbit_free_csk(struct kref *kref); +void _cxgbit_free_cnp(struct kref *kref); + +static inline void cxgbit_get_cdev(struct cxgbit_device *cdev) +{ + kref_get(&cdev->kref); +} + +static inline void cxgbit_put_cdev(struct cxgbit_device *cdev) +{ + kref_put(&cdev->kref, _cxgbit_free_cdev); +} + +static inline void cxgbit_get_csk(struct cxgbit_sock *csk) +{ + kref_get(&csk->kref); +} + +static inline void cxgbit_put_csk(struct cxgbit_sock *csk) +{ + kref_put(&csk->kref, _cxgbit_free_csk); +} + +static inline void cxgbit_get_cnp(struct cxgbit_np *cnp) +{ + kref_get(&cnp->kref); +} + +static inline void cxgbit_put_cnp(struct cxgbit_np *cnp) +{ + kref_put(&cnp->kref, _cxgbit_free_cnp); +} + +static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk) +{ + csk->wr_pending_tail = NULL; + csk->wr_pending_head = NULL; +} + +static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk) +{ + return csk->wr_pending_head; +} + +static inline void +cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + cxgbit_skcb_tx_wr_next(skb) = NULL; + + skb_get(skb); + + if (!csk->wr_pending_head) + csk->wr_pending_head = skb; + else + cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb; + csk->wr_pending_tail = skb; +} + +static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk) +{ + struct sk_buff *skb = csk->wr_pending_head; + + if (likely(skb)) { + csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb); + cxgbit_skcb_tx_wr_next(skb) = NULL; + } + return skb; +} + +typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *, + struct sk_buff *); + +int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *); +int cxgbit_setup_conn_digest(struct cxgbit_sock *); +int cxgbit_accept_np(struct iscsi_np *, struct iscsit_conn *); +void cxgbit_free_np(struct iscsi_np *); +void cxgbit_abort_conn(struct cxgbit_sock *csk); +void cxgbit_free_conn(struct iscsit_conn *); +extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS]; +int cxgbit_get_login_rx(struct iscsit_conn *, struct iscsi_login *); +int cxgbit_rx_data_ack(struct cxgbit_sock *); +int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *, + struct l2t_entry *); +void cxgbit_push_tx_frames(struct cxgbit_sock *); +int cxgbit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32); +int cxgbit_xmit_pdu(struct iscsit_conn *, struct iscsit_cmd *, + struct iscsi_datain_req *, const void *, u32); +void cxgbit_get_r2t_ttt(struct iscsit_conn *, struct iscsit_cmd *, + struct iscsi_r2t *); +u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *); +int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *); +void cxgbit_get_rx_pdu(struct iscsit_conn *); +int cxgbit_validate_params(struct iscsit_conn *); +struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *); + +/* DDP */ +int cxgbit_ddp_init(struct cxgbit_device *); +int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32); +int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsit_cmd *); +void cxgbit_unmap_cmd(struct iscsit_conn *, struct iscsit_cmd *); + +static inline +struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev) +{ + return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm); +} +#endif /* __CXGBIT_H__ */ diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c new file mode 100644 index 0000000000..d9204c590d --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -0,0 +1,2018 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + */ + +#include <linux/module.h> +#include <linux/list.h> +#include <linux/workqueue.h> +#include <linux/skbuff.h> +#include <linux/timer.h> +#include <linux/notifier.h> +#include <linux/inetdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/if_vlan.h> + +#include <net/neighbour.h> +#include <net/netevent.h> +#include <net/route.h> +#include <net/tcp.h> +#include <net/ip6_route.h> +#include <net/addrconf.h> + +#include <libcxgb_cm.h> +#include "cxgbit.h" +#include "clip_tbl.h" + +static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp) +{ + wr_waitp->ret = 0; + reinit_completion(&wr_waitp->completion); +} + +static void +cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret) +{ + if (ret == CPL_ERR_NONE) + wr_waitp->ret = 0; + else + wr_waitp->ret = -EIO; + + if (wr_waitp->ret) + pr_err("%s: err:%u", func, ret); + + complete(&wr_waitp->completion); +} + +static int +cxgbit_wait_for_reply(struct cxgbit_device *cdev, + struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout, + const char *func) +{ + int ret; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { + wr_waitp->ret = -EIO; + goto out; + } + + ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ); + if (!ret) { + pr_info("%s - Device %s not responding tid %u\n", + func, pci_name(cdev->lldi.pdev), tid); + wr_waitp->ret = -ETIMEDOUT; + } +out: + if (wr_waitp->ret) + pr_info("%s: FW reply %d tid %u\n", + pci_name(cdev->lldi.pdev), wr_waitp->ret, tid); + return wr_waitp->ret; +} + +static int cxgbit_np_hashfn(const struct cxgbit_np *cnp) +{ + return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1); +} + +static struct np_info * +cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp, + unsigned int stid) +{ + struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL); + + if (p) { + int bucket = cxgbit_np_hashfn(cnp); + + p->cnp = cnp; + p->stid = stid; + spin_lock(&cdev->np_lock); + p->next = cdev->np_hash_tab[bucket]; + cdev->np_hash_tab[bucket] = p; + spin_unlock(&cdev->np_lock); + } + + return p; +} + +static int +cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid = -1, bucket = cxgbit_np_hashfn(cnp); + struct np_info *p; + + spin_lock(&cdev->np_lock); + for (p = cdev->np_hash_tab[bucket]; p; p = p->next) { + if (p->cnp == cnp) { + stid = p->stid; + break; + } + } + spin_unlock(&cdev->np_lock); + + return stid; +} + +static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid = -1, bucket = cxgbit_np_hashfn(cnp); + struct np_info *p, **prev = &cdev->np_hash_tab[bucket]; + + spin_lock(&cdev->np_lock); + for (p = *prev; p; prev = &p->next, p = p->next) { + if (p->cnp == cnp) { + stid = p->stid; + *prev = p->next; + kfree(p); + break; + } + } + spin_unlock(&cdev->np_lock); + + return stid; +} + +void _cxgbit_free_cnp(struct kref *kref) +{ + struct cxgbit_np *cnp; + + cnp = container_of(kref, struct cxgbit_np, kref); + kfree(cnp); +} + +static int +cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid, + struct cxgbit_np *cnp) +{ + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) + &cnp->com.local_addr; + int addr_type; + int ret; + + pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n", + __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port); + + addr_type = ipv6_addr_type((const struct in6_addr *) + &sin6->sin6_addr); + if (addr_type != IPV6_ADDR_ANY) { + ret = cxgb4_clip_get(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, 1); + if (ret) { + pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n", + sin6->sin6_addr.s6_addr, ret); + return -ENOMEM; + } + } + + cxgbit_get_cnp(cnp); + cxgbit_init_wr_wait(&cnp->com.wr_wait); + + ret = cxgb4_create_server6(cdev->lldi.ports[0], + stid, &sin6->sin6_addr, + sin6->sin6_port, + cdev->lldi.rxq_ids[0]); + if (!ret) + ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait, + 0, 10, __func__); + else if (ret > 0) + ret = net_xmit_errno(ret); + else + cxgbit_put_cnp(cnp); + + if (ret) { + if (ret != -ETIMEDOUT) + cxgb4_clip_release(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, 1); + + pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n", + ret, stid, sin6->sin6_addr.s6_addr, + ntohs(sin6->sin6_port)); + } + + return ret; +} + +static int +cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid, + struct cxgbit_np *cnp) +{ + struct sockaddr_in *sin = (struct sockaddr_in *) + &cnp->com.local_addr; + int ret; + + pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n", + __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port); + + cxgbit_get_cnp(cnp); + cxgbit_init_wr_wait(&cnp->com.wr_wait); + + ret = cxgb4_create_server(cdev->lldi.ports[0], + stid, sin->sin_addr.s_addr, + sin->sin_port, 0, + cdev->lldi.rxq_ids[0]); + if (!ret) + ret = cxgbit_wait_for_reply(cdev, + &cnp->com.wr_wait, + 0, 10, __func__); + else if (ret > 0) + ret = net_xmit_errno(ret); + else + cxgbit_put_cnp(cnp); + + if (ret) + pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n", + ret, stid, &sin->sin_addr, ntohs(sin->sin_port)); + return ret; +} + +struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id) +{ + struct cxgbit_device *cdev; + u8 i; + + list_for_each_entry(cdev, &cdev_list_head, list) { + struct cxgb4_lld_info *lldi = &cdev->lldi; + + for (i = 0; i < lldi->nports; i++) { + if (lldi->ports[i] == ndev) { + if (port_id) + *port_id = i; + return cdev; + } + } + } + + return NULL; +} + +static struct net_device *cxgbit_get_real_dev(struct net_device *ndev) +{ + if (ndev->priv_flags & IFF_BONDING) { + pr_err("Bond devices are not supported. Interface:%s\n", + ndev->name); + return NULL; + } + + if (is_vlan_dev(ndev)) + return vlan_dev_real_dev(ndev); + + return ndev; +} + +static struct net_device *cxgbit_ipv4_netdev(__be32 saddr) +{ + struct net_device *ndev; + + ndev = __ip_dev_find(&init_net, saddr, false); + if (!ndev) + return NULL; + + return cxgbit_get_real_dev(ndev); +} + +static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6) +{ + struct net_device *ndev = NULL; + bool found = false; + + if (IS_ENABLED(CONFIG_IPV6)) { + for_each_netdev_rcu(&init_net, ndev) + if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) { + found = true; + break; + } + } + if (!found) + return NULL; + return cxgbit_get_real_dev(ndev); +} + +static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp) +{ + struct sockaddr_storage *sockaddr = &cnp->com.local_addr; + int ss_family = sockaddr->ss_family; + struct net_device *ndev = NULL; + struct cxgbit_device *cdev = NULL; + + rcu_read_lock(); + if (ss_family == AF_INET) { + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)sockaddr; + ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr); + } else if (ss_family == AF_INET6) { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)sockaddr; + ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr); + } + if (!ndev) + goto out; + + cdev = cxgbit_find_device(ndev, NULL); +out: + rcu_read_unlock(); + return cdev; +} + +static bool cxgbit_inaddr_any(struct cxgbit_np *cnp) +{ + struct sockaddr_storage *sockaddr = &cnp->com.local_addr; + int ss_family = sockaddr->ss_family; + int addr_type; + + if (ss_family == AF_INET) { + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)sockaddr; + if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) + return true; + } else if (ss_family == AF_INET6) { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)sockaddr; + addr_type = ipv6_addr_type((const struct in6_addr *) + &sin6->sin6_addr); + if (addr_type == IPV6_ADDR_ANY) + return true; + } + return false; +} + +static int +__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid, ret; + int ss_family = cnp->com.local_addr.ss_family; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) + return -EINVAL; + + stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp); + if (stid < 0) + return -EINVAL; + + if (!cxgbit_np_hash_add(cdev, cnp, stid)) { + cxgb4_free_stid(cdev->lldi.tids, stid, ss_family); + return -EINVAL; + } + + if (ss_family == AF_INET) + ret = cxgbit_create_server4(cdev, stid, cnp); + else + ret = cxgbit_create_server6(cdev, stid, cnp); + + if (ret) { + if (ret != -ETIMEDOUT) + cxgb4_free_stid(cdev->lldi.tids, stid, + ss_family); + cxgbit_np_hash_del(cdev, cnp); + return ret; + } + return ret; +} + +static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + int ret = -1; + + mutex_lock(&cdev_list_lock); + cdev = cxgbit_find_np_cdev(cnp); + if (!cdev) + goto out; + + if (cxgbit_np_hash_find(cdev, cnp) >= 0) + goto out; + + if (__cxgbit_setup_cdev_np(cdev, cnp)) + goto out; + + cnp->com.cdev = cdev; + ret = 0; +out: + mutex_unlock(&cdev_list_lock); + return ret; +} + +static int cxgbit_setup_all_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + int ret; + u32 count = 0; + + mutex_lock(&cdev_list_lock); + list_for_each_entry(cdev, &cdev_list_head, list) { + if (cxgbit_np_hash_find(cdev, cnp) >= 0) { + mutex_unlock(&cdev_list_lock); + return -1; + } + } + + list_for_each_entry(cdev, &cdev_list_head, list) { + ret = __cxgbit_setup_cdev_np(cdev, cnp); + if (ret == -ETIMEDOUT) + break; + if (ret != 0) + continue; + count++; + } + mutex_unlock(&cdev_list_lock); + + return count ? 0 : -1; +} + +int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr) +{ + struct cxgbit_np *cnp; + int ret; + + if ((ksockaddr->ss_family != AF_INET) && + (ksockaddr->ss_family != AF_INET6)) + return -EINVAL; + + cnp = kzalloc(sizeof(*cnp), GFP_KERNEL); + if (!cnp) + return -ENOMEM; + + init_waitqueue_head(&cnp->accept_wait); + init_completion(&cnp->com.wr_wait.completion); + init_completion(&cnp->accept_comp); + INIT_LIST_HEAD(&cnp->np_accept_list); + spin_lock_init(&cnp->np_accept_lock); + kref_init(&cnp->kref); + memcpy(&np->np_sockaddr, ksockaddr, + sizeof(struct sockaddr_storage)); + memcpy(&cnp->com.local_addr, &np->np_sockaddr, + sizeof(cnp->com.local_addr)); + + cnp->np = np; + cnp->com.cdev = NULL; + + if (cxgbit_inaddr_any(cnp)) + ret = cxgbit_setup_all_np(cnp); + else + ret = cxgbit_setup_cdev_np(cnp); + + if (ret) { + cxgbit_put_cnp(cnp); + return -EINVAL; + } + + np->np_context = cnp; + cnp->com.state = CSK_STATE_LISTEN; + return 0; +} + +static void +cxgbit_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn, + struct cxgbit_sock *csk) +{ + conn->login_family = np->np_sockaddr.ss_family; + conn->login_sockaddr = csk->com.remote_addr; + conn->local_sockaddr = csk->com.local_addr; +} + +int cxgbit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn) +{ + struct cxgbit_np *cnp = np->np_context; + struct cxgbit_sock *csk; + int ret = 0; + +accept_wait: + ret = wait_for_completion_interruptible(&cnp->accept_comp); + if (ret) + return -ENODEV; + + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { + spin_unlock_bh(&np->np_thread_lock); + /** + * No point in stalling here when np_thread + * is in state RESET/SHUTDOWN/EXIT - bail + **/ + return -ENODEV; + } + spin_unlock_bh(&np->np_thread_lock); + + spin_lock_bh(&cnp->np_accept_lock); + if (list_empty(&cnp->np_accept_list)) { + spin_unlock_bh(&cnp->np_accept_lock); + goto accept_wait; + } + + csk = list_first_entry(&cnp->np_accept_list, + struct cxgbit_sock, + accept_node); + + list_del_init(&csk->accept_node); + spin_unlock_bh(&cnp->np_accept_lock); + conn->context = csk; + csk->conn = conn; + + cxgbit_set_conn_info(np, conn, csk); + return 0; +} + +static int +__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp) +{ + int stid, ret; + bool ipv6 = false; + + stid = cxgbit_np_hash_del(cdev, cnp); + if (stid < 0) + return -EINVAL; + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) + return -EINVAL; + + if (cnp->np->np_sockaddr.ss_family == AF_INET6) + ipv6 = true; + + cxgbit_get_cnp(cnp); + cxgbit_init_wr_wait(&cnp->com.wr_wait); + ret = cxgb4_remove_server(cdev->lldi.ports[0], stid, + cdev->lldi.rxq_ids[0], ipv6); + + if (ret > 0) + ret = net_xmit_errno(ret); + + if (ret) { + cxgbit_put_cnp(cnp); + return ret; + } + + ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait, + 0, 10, __func__); + if (ret == -ETIMEDOUT) + return ret; + + if (ipv6 && cnp->com.cdev) { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr; + cxgb4_clip_release(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, + 1); + } + + cxgb4_free_stid(cdev->lldi.tids, stid, + cnp->com.local_addr.ss_family); + return 0; +} + +static void cxgbit_free_all_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + int ret; + + mutex_lock(&cdev_list_lock); + list_for_each_entry(cdev, &cdev_list_head, list) { + ret = __cxgbit_free_cdev_np(cdev, cnp); + if (ret == -ETIMEDOUT) + break; + } + mutex_unlock(&cdev_list_lock); +} + +static void cxgbit_free_cdev_np(struct cxgbit_np *cnp) +{ + struct cxgbit_device *cdev; + bool found = false; + + mutex_lock(&cdev_list_lock); + list_for_each_entry(cdev, &cdev_list_head, list) { + if (cdev == cnp->com.cdev) { + found = true; + break; + } + } + if (!found) + goto out; + + __cxgbit_free_cdev_np(cdev, cnp); +out: + mutex_unlock(&cdev_list_lock); +} + +static void __cxgbit_free_conn(struct cxgbit_sock *csk); + +void cxgbit_free_np(struct iscsi_np *np) +{ + struct cxgbit_np *cnp = np->np_context; + struct cxgbit_sock *csk, *tmp; + + cnp->com.state = CSK_STATE_DEAD; + if (cnp->com.cdev) + cxgbit_free_cdev_np(cnp); + else + cxgbit_free_all_np(cnp); + + spin_lock_bh(&cnp->np_accept_lock); + list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) { + list_del_init(&csk->accept_node); + __cxgbit_free_conn(csk); + } + spin_unlock_bh(&cnp->np_accept_lock); + + np->np_context = NULL; + cxgbit_put_cnp(cnp); +} + +static void cxgbit_send_halfclose(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + u32 len = roundup(sizeof(struct cpl_close_con_req), 16); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return; + + cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx, + NULL, NULL); + + cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL; + __skb_queue_tail(&csk->txq, skb); + cxgbit_push_tx_frames(csk); +} + +static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb) +{ + struct cxgbit_sock *csk = handle; + + pr_debug("%s cxgbit_device %p\n", __func__, handle); + kfree_skb(skb); + cxgbit_put_csk(csk); +} + +static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb) +{ + struct cxgbit_device *cdev = handle; + struct cpl_abort_req *req = cplhdr(skb); + + pr_debug("%s cdev %p\n", __func__, cdev); + req->cmd = CPL_ABORT_NO_RST; + cxgbit_ofld_send(cdev, skb); +} + +static int cxgbit_send_abort_req(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + u32 len = roundup(sizeof(struct cpl_abort_req), 16); + + pr_debug("%s: csk %p tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + __skb_queue_purge(&csk->txq); + + if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) + cxgbit_send_tx_flowc_wr(csk); + + skb = __skb_dequeue(&csk->skbq); + cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx, + csk->com.cdev, cxgbit_abort_arp_failure); + + return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); +} + +static void +__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + __kfree_skb(skb); + + if (csk->com.state != CSK_STATE_ESTABLISHED) + goto no_abort; + + set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags); + csk->com.state = CSK_STATE_ABORTING; + + cxgbit_send_abort_req(csk); + + return; + +no_abort: + cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE); + cxgbit_put_csk(csk); +} + +void cxgbit_abort_conn(struct cxgbit_sock *csk) +{ + struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL); + + cxgbit_get_csk(csk); + cxgbit_init_wr_wait(&csk->com.wr_wait); + + spin_lock_bh(&csk->lock); + if (csk->lock_owner) { + cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn; + __skb_queue_tail(&csk->backlogq, skb); + } else { + __cxgbit_abort_conn(csk, skb); + } + spin_unlock_bh(&csk->lock); + + cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait, + csk->tid, 600, __func__); +} + +static void __cxgbit_free_conn(struct cxgbit_sock *csk) +{ + struct iscsit_conn *conn = csk->conn; + bool release = false; + + pr_debug("%s: state %d\n", + __func__, csk->com.state); + + spin_lock_bh(&csk->lock); + switch (csk->com.state) { + case CSK_STATE_ESTABLISHED: + if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) { + csk->com.state = CSK_STATE_CLOSING; + cxgbit_send_halfclose(csk); + } else { + csk->com.state = CSK_STATE_ABORTING; + cxgbit_send_abort_req(csk); + } + break; + case CSK_STATE_CLOSING: + csk->com.state = CSK_STATE_MORIBUND; + cxgbit_send_halfclose(csk); + break; + case CSK_STATE_DEAD: + release = true; + break; + default: + pr_err("%s: csk %p; state %d\n", + __func__, csk, csk->com.state); + } + spin_unlock_bh(&csk->lock); + + if (release) + cxgbit_put_csk(csk); +} + +void cxgbit_free_conn(struct iscsit_conn *conn) +{ + __cxgbit_free_conn(conn->context); +} + +static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) +{ + csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - + ((csk->com.remote_addr.ss_family == AF_INET) ? + sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - + sizeof(struct tcphdr); + csk->mss = csk->emss; + if (TCPOPT_TSTAMP_G(opt)) + csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4); + if (csk->emss < 128) + csk->emss = 128; + if (csk->emss & 7) + pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n", + TCPOPT_MSS_G(opt), csk->mss, csk->emss); + pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt), + csk->mss, csk->emss); +} + +static void cxgbit_free_skb(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + + __skb_queue_purge(&csk->txq); + __skb_queue_purge(&csk->rxq); + __skb_queue_purge(&csk->backlogq); + __skb_queue_purge(&csk->ppodq); + __skb_queue_purge(&csk->skbq); + + while ((skb = cxgbit_sock_dequeue_wr(csk))) + kfree_skb(skb); + + __kfree_skb(csk->lro_hskb); +} + +void _cxgbit_free_csk(struct kref *kref) +{ + struct cxgbit_sock *csk; + struct cxgbit_device *cdev; + + csk = container_of(kref, struct cxgbit_sock, kref); + + pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state); + + if (csk->com.local_addr.ss_family == AF_INET6) { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) + &csk->com.local_addr; + cxgb4_clip_release(csk->com.cdev->lldi.ports[0], + (const u32 *) + &sin6->sin6_addr.s6_addr, 1); + } + + cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid, + csk->com.local_addr.ss_family); + dst_release(csk->dst); + cxgb4_l2t_release(csk->l2t); + + cdev = csk->com.cdev; + spin_lock_bh(&cdev->cskq.lock); + list_del(&csk->list); + spin_unlock_bh(&cdev->cskq.lock); + + cxgbit_free_skb(csk); + cxgbit_put_cnp(csk->cnp); + cxgbit_put_cdev(cdev); + + kfree(csk); +} + +static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi) +{ + unsigned int linkspeed; + u8 scale; + + linkspeed = pi->link_cfg.speed; + scale = linkspeed / SPEED_10000; + +#define CXGBIT_10G_RCV_WIN (256 * 1024) + csk->rcv_win = CXGBIT_10G_RCV_WIN; + if (scale) + csk->rcv_win *= scale; + csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10); + +#define CXGBIT_10G_SND_WIN (256 * 1024) + csk->snd_win = CXGBIT_10G_SND_WIN; + if (scale) + csk->snd_win *= scale; + csk->snd_win = min(csk->snd_win, 512U * 1024); + + pr_debug("%s snd_win %d rcv_win %d\n", + __func__, csk->snd_win, csk->rcv_win); +} + +#ifdef CONFIG_CHELSIO_T4_DCB +static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev) +{ + return ndev->dcbnl_ops->getstate(ndev); +} + +static int cxgbit_select_priority(int pri_mask) +{ + if (!pri_mask) + return 0; + + return (ffs(pri_mask) - 1); +} + +static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port) +{ + int ret; + u8 caps; + + struct dcb_app iscsi_dcb_app = { + .protocol = local_port + }; + + ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); + + if (ret) + return 0; + + if (caps & DCB_CAP_DCBX_VER_IEEE) { + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; + ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); + if (!ret) { + iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; + ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); + } + } else if (caps & DCB_CAP_DCBX_VER_CEE) { + iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; + + ret = dcb_getapp(ndev, &iscsi_dcb_app); + } + + pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret)); + + return cxgbit_select_priority(ret); +} +#endif + +static int +cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, + u16 local_port, struct dst_entry *dst, + struct cxgbit_device *cdev) +{ + struct neighbour *n; + int ret, step; + struct net_device *ndev; + u16 rxq_idx, port_id; +#ifdef CONFIG_CHELSIO_T4_DCB + u8 priority = 0; +#endif + + n = dst_neigh_lookup(dst, peer_ip); + if (!n) + return -ENODEV; + + rcu_read_lock(); + if (!(n->nud_state & NUD_VALID)) + neigh_event_send(n, NULL); + + ret = -ENOMEM; + if (n->dev->flags & IFF_LOOPBACK) { + if (iptype == 4) + ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip); + else if (IS_ENABLED(CONFIG_IPV6)) + ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip); + else + ndev = NULL; + + if (!ndev) { + ret = -ENODEV; + goto out; + } + + csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, + n, ndev, 0); + if (!csk->l2t) + goto out; + csk->mtu = ndev->mtu; + csk->tx_chan = cxgb4_port_chan(ndev); + csk->smac_idx = + ((struct port_info *)netdev_priv(ndev))->smt_idx; + step = cdev->lldi.ntxq / + cdev->lldi.nchan; + csk->txq_idx = cxgb4_port_idx(ndev) * step; + step = cdev->lldi.nrxq / + cdev->lldi.nchan; + csk->ctrlq_idx = cxgb4_port_idx(ndev); + csk->rss_qid = cdev->lldi.rxq_ids[ + cxgb4_port_idx(ndev) * step]; + csk->port_id = cxgb4_port_idx(ndev); + cxgbit_set_tcp_window(csk, + (struct port_info *)netdev_priv(ndev)); + } else { + ndev = cxgbit_get_real_dev(n->dev); + if (!ndev) { + ret = -ENODEV; + goto out; + } + +#ifdef CONFIG_CHELSIO_T4_DCB + if (cxgbit_get_iscsi_dcb_state(ndev)) + priority = cxgbit_get_iscsi_dcb_priority(ndev, + local_port); + + csk->dcb_priority = priority; + + csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority); +#else + csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0); +#endif + if (!csk->l2t) + goto out; + port_id = cxgb4_port_idx(ndev); + csk->mtu = dst_mtu(dst); + csk->tx_chan = cxgb4_port_chan(ndev); + csk->smac_idx = + ((struct port_info *)netdev_priv(ndev))->smt_idx; + step = cdev->lldi.ntxq / + cdev->lldi.nports; + csk->txq_idx = (port_id * step) + + (cdev->selectq[port_id][0]++ % step); + csk->ctrlq_idx = cxgb4_port_idx(ndev); + step = cdev->lldi.nrxq / + cdev->lldi.nports; + rxq_idx = (port_id * step) + + (cdev->selectq[port_id][1]++ % step); + csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx]; + csk->port_id = port_id; + cxgbit_set_tcp_window(csk, + (struct port_info *)netdev_priv(ndev)); + } + ret = 0; +out: + rcu_read_unlock(); + neigh_release(n); + return ret; +} + +int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + int ret = 0; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { + kfree_skb(skb); + pr_err("%s - device not up - dropping\n", __func__); + return -EIO; + } + + ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb); + if (ret < 0) + kfree_skb(skb); + return ret < 0 ? ret : 0; +} + +static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid) +{ + u32 len = roundup(sizeof(struct cpl_tid_release), 16); + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return; + + cxgb_mk_tid_release(skb, len, tid, 0); + cxgbit_ofld_send(cdev, skb); +} + +int +cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb, + struct l2t_entry *l2e) +{ + int ret = 0; + + if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { + kfree_skb(skb); + pr_err("%s - device not up - dropping\n", __func__); + return -EIO; + } + + ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e); + if (ret < 0) + kfree_skb(skb); + return ret < 0 ? ret : 0; +} + +static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + if (csk->com.state != CSK_STATE_ESTABLISHED) { + __kfree_skb(skb); + return; + } + + cxgbit_ofld_send(csk->com.cdev, skb); +} + +/* + * CPL connection rx data ack: host -> + * Send RX credits through an RX_DATA_ACK CPL message. + * Returns the number of credits sent. + */ +int cxgbit_rx_data_ack(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16); + u32 credit_dack; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -1; + + credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) | + RX_CREDITS_V(csk->rx_credits); + + cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx, + credit_dack); + + csk->rx_credits = 0; + + spin_lock_bh(&csk->lock); + if (csk->lock_owner) { + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits; + __skb_queue_tail(&csk->backlogq, skb); + spin_unlock_bh(&csk->lock); + return 0; + } + + cxgbit_send_rx_credits(csk, skb); + spin_unlock_bh(&csk->lock); + + return 0; +} + +#define FLOWC_WR_NPARAMS_MIN 9 +#define FLOWC_WR_NPARAMS_MAX 11 +static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + u32 len, flowclen; + u8 i; + + flowclen = offsetof(struct fw_flowc_wr, + mnemval[FLOWC_WR_NPARAMS_MAX]); + + len = max_t(u32, sizeof(struct cpl_abort_req), + sizeof(struct cpl_abort_rpl)); + + len = max(len, flowclen); + len = roundup(len, 16); + + for (i = 0; i < 3; i++) { + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + goto out; + __skb_queue_tail(&csk->skbq, skb); + } + + skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC); + if (!skb) + goto out; + + memset(skb->data, 0, LRO_SKB_MIN_HEADROOM); + csk->lro_hskb = skb; + + return 0; +out: + __skb_queue_purge(&csk->skbq); + return -ENOMEM; +} + +static void +cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) +{ + struct sk_buff *skb; + const struct tcphdr *tcph; + struct cpl_t5_pass_accept_rpl *rpl5; + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; + unsigned int len = roundup(sizeof(*rpl5), 16); + unsigned int mtu_idx; + u64 opt0; + u32 opt2, hlen; + u32 wscale; + u32 win; + + pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + cxgbit_put_csk(csk); + return; + } + + rpl5 = __skb_put_zero(skb, len); + + INIT_TP_WR(rpl5, csk->tid); + OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, + csk->tid)); + cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx, + req->tcpopt.tstamp, + (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1); + wscale = cxgb_compute_wscale(csk->rcv_win); + /* + * Specify the largest window that will fit in opt0. The + * remainder will be specified in the rx_data_ack. + */ + win = csk->rcv_win >> 10; + if (win > RCV_BUFSIZ_M) + win = RCV_BUFSIZ_M; + opt0 = TCAM_BYPASS_F | + WND_SCALE_V(wscale) | + MSS_IDX_V(mtu_idx) | + L2T_IDX_V(csk->l2t->idx) | + TX_CHAN_V(csk->tx_chan) | + SMAC_SEL_V(csk->smac_idx) | + DSCP_V(csk->tos >> 2) | + ULP_MODE_V(ULP_MODE_ISCSI) | + RCV_BUFSIZ_V(win); + + opt2 = RX_CHANNEL_V(0) | + RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); + + if (!is_t5(lldi->adapter_type)) + opt2 |= RX_FC_DISABLE_F; + + if (req->tcpopt.tstamp) + opt2 |= TSTAMPS_EN_F; + if (req->tcpopt.sack) + opt2 |= SACK_EN_F; + if (wscale) + opt2 |= WND_SCALE_EN_F; + + hlen = ntohl(req->hdr_len); + + if (is_t5(lldi->adapter_type)) + tcph = (struct tcphdr *)((u8 *)(req + 1) + + ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen)); + else + tcph = (struct tcphdr *)((u8 *)(req + 1) + + T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen)); + + if (tcph->ece && tcph->cwr) + opt2 |= CCTRL_ECN_V(1); + + opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); + + opt2 |= T5_ISS_F; + rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1); + + opt2 |= T5_OPT_2_VALID_F; + + rpl5->opt0 = cpu_to_be64(opt0); + rpl5->opt2 = cpu_to_be32(opt2); + set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx); + t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard); + cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); +} + +static void +cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk = NULL; + struct cxgbit_np *cnp; + struct cpl_pass_accept_req *req = cplhdr(skb); + unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); + struct tid_info *t = cdev->lldi.tids; + unsigned int tid = GET_TID(req); + u16 peer_mss = ntohs(req->tcpopt.mss); + unsigned short hdrs; + + struct dst_entry *dst; + __u8 local_ip[16], peer_ip[16]; + __be16 local_port, peer_port; + int ret; + int iptype; + + pr_debug("%s: cdev = %p; stid = %u; tid = %u\n", + __func__, cdev, stid, tid); + + cnp = lookup_stid(t, stid); + if (!cnp) { + pr_err("%s connect request on invalid stid %d\n", + __func__, stid); + goto rel_skb; + } + + if (cnp->com.state != CSK_STATE_LISTEN) { + pr_err("%s - listening parent not in CSK_STATE_LISTEN\n", + __func__); + goto reject; + } + + csk = lookup_tid(t, tid); + if (csk) { + pr_err("%s csk not null tid %u\n", + __func__, tid); + goto rel_skb; + } + + cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip, + peer_ip, &local_port, &peer_port); + + /* Find output route */ + if (iptype == 4) { + pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 " + "lport %d rport %d peer_mss %d\n" + , __func__, cnp, tid, + local_ip, peer_ip, ntohs(local_port), + ntohs(peer_port), peer_mss); + dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev, + *(__be32 *)local_ip, + *(__be32 *)peer_ip, + local_port, peer_port, + PASS_OPEN_TOS_G(ntohl(req->tos_stid))); + } else { + pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 " + "lport %d rport %d peer_mss %d\n" + , __func__, cnp, tid, + local_ip, peer_ip, ntohs(local_port), + ntohs(peer_port), peer_mss); + dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev, + local_ip, peer_ip, + local_port, peer_port, + PASS_OPEN_TOS_G(ntohl(req->tos_stid)), + ((struct sockaddr_in6 *) + &cnp->com.local_addr)->sin6_scope_id); + } + if (!dst) { + pr_err("%s - failed to find dst entry!\n", + __func__); + goto reject; + } + + csk = kzalloc(sizeof(*csk), GFP_ATOMIC); + if (!csk) { + dst_release(dst); + goto rel_skb; + } + + ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port), + dst, cdev); + if (ret) { + pr_err("%s - failed to allocate l2t entry!\n", + __func__); + dst_release(dst); + kfree(csk); + goto reject; + } + + kref_init(&csk->kref); + init_completion(&csk->com.wr_wait.completion); + + INIT_LIST_HEAD(&csk->accept_node); + + hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0); + if (peer_mss && csk->mtu > (peer_mss + hdrs)) + csk->mtu = peer_mss + hdrs; + + csk->com.state = CSK_STATE_CONNECTING; + csk->com.cdev = cdev; + csk->cnp = cnp; + csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); + csk->dst = dst; + csk->tid = tid; + csk->wr_cred = cdev->lldi.wr_cred - + DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); + csk->wr_max_cred = csk->wr_cred; + csk->wr_una_cred = 0; + + if (iptype == 4) { + struct sockaddr_in *sin = (struct sockaddr_in *) + &csk->com.local_addr; + sin->sin_family = AF_INET; + sin->sin_port = local_port; + sin->sin_addr.s_addr = *(__be32 *)local_ip; + + sin = (struct sockaddr_in *)&csk->com.remote_addr; + sin->sin_family = AF_INET; + sin->sin_port = peer_port; + sin->sin_addr.s_addr = *(__be32 *)peer_ip; + } else { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) + &csk->com.local_addr; + + sin6->sin6_family = PF_INET6; + sin6->sin6_port = local_port; + memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); + cxgb4_clip_get(cdev->lldi.ports[0], + (const u32 *)&sin6->sin6_addr.s6_addr, + 1); + + sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr; + sin6->sin6_family = PF_INET6; + sin6->sin6_port = peer_port; + memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); + } + + skb_queue_head_init(&csk->rxq); + skb_queue_head_init(&csk->txq); + skb_queue_head_init(&csk->ppodq); + skb_queue_head_init(&csk->backlogq); + skb_queue_head_init(&csk->skbq); + cxgbit_sock_reset_wr_list(csk); + spin_lock_init(&csk->lock); + init_waitqueue_head(&csk->waitq); + csk->lock_owner = false; + + if (cxgbit_alloc_csk_skb(csk)) { + dst_release(dst); + kfree(csk); + goto rel_skb; + } + + cxgbit_get_cnp(cnp); + cxgbit_get_cdev(cdev); + + spin_lock(&cdev->cskq.lock); + list_add_tail(&csk->list, &cdev->cskq.list); + spin_unlock(&cdev->cskq.lock); + cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family); + cxgbit_pass_accept_rpl(csk, req); + goto rel_skb; + +reject: + cxgbit_release_tid(cdev, tid); +rel_skb: + __kfree_skb(skb); +} + +static u32 +cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp, + u32 *flowclenp) +{ + u32 nparams, flowclen16, flowclen; + + nparams = FLOWC_WR_NPARAMS_MIN; + + if (csk->snd_wscale) + nparams++; + +#ifdef CONFIG_CHELSIO_T4_DCB + nparams++; +#endif + flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); + flowclen16 = DIV_ROUND_UP(flowclen, 16); + flowclen = flowclen16 * 16; + /* + * Return the number of 16-byte credits used by the flowc request. + * Pass back the nparams and actual flowc length if requested. + */ + if (nparamsp) + *nparamsp = nparams; + if (flowclenp) + *flowclenp = flowclen; + return flowclen16; +} + +u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk) +{ + struct cxgbit_device *cdev = csk->com.cdev; + struct fw_flowc_wr *flowc; + u32 nparams, flowclen16, flowclen; + struct sk_buff *skb; + u8 index; + +#ifdef CONFIG_CHELSIO_T4_DCB + u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; +#endif + + flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen); + + skb = __skb_dequeue(&csk->skbq); + flowc = __skb_put_zero(skb, flowclen); + + flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | + FW_FLOWC_WR_NPARAMS_V(nparams)); + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) | + FW_WR_FLOWID_V(csk->tid)); + flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; + flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V + (csk->com.cdev->lldi.pf)); + flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; + flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan); + flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; + flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan); + flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; + flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid); + flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; + flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt); + flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; + flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt); + flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; + flowc->mnemval[6].val = cpu_to_be32(csk->snd_win); + flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; + flowc->mnemval[7].val = cpu_to_be32(csk->emss); + + flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) + flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD); + else + flowc->mnemval[8].val = cpu_to_be32(16384); + + index = 9; + + if (csk->snd_wscale) { + flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE; + flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale); + index++; + } + +#ifdef CONFIG_CHELSIO_T4_DCB + flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO; + if (vlan == VLAN_NONE) { + pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid); + flowc->mnemval[index].val = cpu_to_be32(0); + } else + flowc->mnemval[index].val = cpu_to_be32( + (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT); +#endif + + pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;" + " rcv_seq = %u; snd_win = %u; emss = %u\n", + __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt, + csk->rcv_nxt, csk->snd_win, csk->emss); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); + cxgbit_ofld_send(csk->com.cdev, skb); + return flowclen16; +} + +static int +cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + spin_lock_bh(&csk->lock); + if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) { + spin_unlock_bh(&csk->lock); + pr_err("%s: csk 0x%p, tid %u, state %u\n", + __func__, csk, csk->tid, csk->com.state); + __kfree_skb(skb); + return -1; + } + + cxgbit_get_csk(csk); + cxgbit_init_wr_wait(&csk->com.wr_wait); + cxgbit_ofld_send(csk->com.cdev, skb); + spin_unlock_bh(&csk->lock); + + return 0; +} + +int cxgbit_setup_conn_digest(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC; + u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC; + unsigned int len = roundup(sizeof(*req), 16); + int ret; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* set up ulp submode */ + req = __skb_put_zero(skb, len); + + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 4); + req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | + (dcrc ? ULP_CRC_DATA : 0)) << 4); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); + + if (cxgbit_send_tcb_skb(csk, skb)) + return -1; + + ret = cxgbit_wait_for_reply(csk->com.cdev, + &csk->com.wr_wait, + csk->tid, 5, __func__); + if (ret) + return -1; + + return 0; +} + +int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + unsigned int len = roundup(sizeof(*req), 16); + int ret; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + req = __skb_put_zero(skb, len); + + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 8); + req->val = cpu_to_be64(pg_idx << 8); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); + + if (cxgbit_send_tcb_skb(csk, skb)) + return -1; + + ret = cxgbit_wait_for_reply(csk->com.cdev, + &csk->com.wr_wait, + csk->tid, 5, __func__); + if (ret) + return -1; + + return 0; +} + +static void +cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cpl_pass_open_rpl *rpl = cplhdr(skb); + struct tid_info *t = cdev->lldi.tids; + unsigned int stid = GET_TID(rpl); + struct cxgbit_np *cnp = lookup_stid(t, stid); + + pr_debug("%s: cnp = %p; stid = %u; status = %d\n", + __func__, cnp, stid, rpl->status); + + if (!cnp) { + pr_info("%s stid %d lookup failure\n", __func__, stid); + goto rel_skb; + } + + cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); + cxgbit_put_cnp(cnp); +rel_skb: + __kfree_skb(skb); +} + +static void +cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); + struct tid_info *t = cdev->lldi.tids; + unsigned int stid = GET_TID(rpl); + struct cxgbit_np *cnp = lookup_stid(t, stid); + + pr_debug("%s: cnp = %p; stid = %u; status = %d\n", + __func__, cnp, stid, rpl->status); + + if (!cnp) { + pr_info("%s stid %d lookup failure\n", __func__, stid); + goto rel_skb; + } + + cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); + cxgbit_put_cnp(cnp); +rel_skb: + __kfree_skb(skb); +} + +static void +cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cpl_pass_establish *req = cplhdr(skb); + struct tid_info *t = cdev->lldi.tids; + unsigned int tid = GET_TID(req); + struct cxgbit_sock *csk; + struct cxgbit_np *cnp; + u16 tcp_opt = be16_to_cpu(req->tcp_opt); + u32 snd_isn = be32_to_cpu(req->snd_isn); + u32 rcv_isn = be32_to_cpu(req->rcv_isn); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + cnp = csk->cnp; + + pr_debug("%s: csk %p; tid %u; cnp %p\n", + __func__, csk, tid, cnp); + + csk->write_seq = snd_isn; + csk->snd_una = snd_isn; + csk->snd_nxt = snd_isn; + + csk->rcv_nxt = rcv_isn; + + csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); + cxgbit_set_emss(csk, tcp_opt); + dst_confirm(csk->dst); + csk->com.state = CSK_STATE_ESTABLISHED; + spin_lock_bh(&cnp->np_accept_lock); + list_add_tail(&csk->accept_node, &cnp->np_accept_list); + spin_unlock_bh(&cnp->np_accept_lock); + complete(&cnp->accept_comp); +rel_skb: + __kfree_skb(skb); +} + +static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + cxgbit_skcb_flags(skb) = 0; + spin_lock_bh(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + spin_unlock_bh(&csk->rxq.lock); + wake_up(&csk->waitq); +} + +static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + switch (csk->com.state) { + case CSK_STATE_ESTABLISHED: + csk->com.state = CSK_STATE_CLOSING; + cxgbit_queue_rx_skb(csk, skb); + return; + case CSK_STATE_CLOSING: + /* simultaneous close */ + csk->com.state = CSK_STATE_MORIBUND; + break; + case CSK_STATE_MORIBUND: + csk->com.state = CSK_STATE_DEAD; + cxgbit_put_csk(csk); + break; + case CSK_STATE_ABORTING: + break; + default: + pr_info("%s: cpl_peer_close in bad state %d\n", + __func__, csk->com.state); + } + + __kfree_skb(skb); +} + +static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + switch (csk->com.state) { + case CSK_STATE_CLOSING: + csk->com.state = CSK_STATE_MORIBUND; + break; + case CSK_STATE_MORIBUND: + csk->com.state = CSK_STATE_DEAD; + cxgbit_put_csk(csk); + break; + case CSK_STATE_ABORTING: + case CSK_STATE_DEAD: + break; + default: + pr_info("%s: cpl_close_con_rpl in bad state %d\n", + __func__, csk->com.state); + } + + __kfree_skb(skb); +} + +static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cpl_abort_req_rss *hdr = cplhdr(skb); + unsigned int tid = GET_TID(hdr); + struct sk_buff *rpl_skb; + bool release = false; + bool wakeup_thread = false; + u32 len = roundup(sizeof(struct cpl_abort_rpl), 16); + + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, tid, csk->com.state); + + if (cxgb_is_neg_adv(hdr->status)) { + pr_err("%s: got neg advise %d on tid %u\n", + __func__, hdr->status, tid); + goto rel_skb; + } + + switch (csk->com.state) { + case CSK_STATE_CONNECTING: + case CSK_STATE_MORIBUND: + csk->com.state = CSK_STATE_DEAD; + release = true; + break; + case CSK_STATE_ESTABLISHED: + csk->com.state = CSK_STATE_DEAD; + wakeup_thread = true; + break; + case CSK_STATE_CLOSING: + csk->com.state = CSK_STATE_DEAD; + if (!csk->conn) + release = true; + break; + case CSK_STATE_ABORTING: + break; + default: + pr_info("%s: cpl_abort_req_rss in bad state %d\n", + __func__, csk->com.state); + csk->com.state = CSK_STATE_DEAD; + } + + __skb_queue_purge(&csk->txq); + + if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) + cxgbit_send_tx_flowc_wr(csk); + + rpl_skb = __skb_dequeue(&csk->skbq); + + cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx); + cxgbit_ofld_send(csk->com.cdev, rpl_skb); + + if (wakeup_thread) { + cxgbit_queue_rx_skb(csk, skb); + return; + } + + if (release) + cxgbit_put_csk(csk); +rel_skb: + __kfree_skb(skb); +} + +static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cpl_abort_rpl_rss *rpl = cplhdr(skb); + + pr_debug("%s: csk %p; tid %u; state %d\n", + __func__, csk, csk->tid, csk->com.state); + + switch (csk->com.state) { + case CSK_STATE_ABORTING: + csk->com.state = CSK_STATE_DEAD; + if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags)) + cxgbit_wake_up(&csk->com.wr_wait, __func__, + rpl->status); + cxgbit_put_csk(csk); + break; + default: + pr_info("%s: cpl_abort_rpl_rss in state %d\n", + __func__, csk->com.state); + } + + __kfree_skb(skb); +} + +static bool cxgbit_credit_err(const struct cxgbit_sock *csk) +{ + const struct sk_buff *skb = csk->wr_pending_head; + u32 credit = 0; + + if (unlikely(csk->wr_cred > csk->wr_max_cred)) { + pr_err("csk 0x%p, tid %u, credit %u > %u\n", + csk, csk->tid, csk->wr_cred, csk->wr_max_cred); + return true; + } + + while (skb) { + credit += (__force u32)skb->csum; + skb = cxgbit_skcb_tx_wr_next(skb); + } + + if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) { + pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", + csk, csk->tid, csk->wr_cred, + credit, csk->wr_max_cred); + + return true; + } + + return false; +} + +static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb); + u32 credits = rpl->credits; + u32 snd_una = ntohl(rpl->snd_una); + + csk->wr_cred += credits; + if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred)) + csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; + + while (credits) { + struct sk_buff *p = cxgbit_sock_peek_wr(csk); + u32 csum; + + if (unlikely(!p)) { + pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", + csk, csk->tid, credits, + csk->wr_cred, csk->wr_una_cred); + break; + } + + csum = (__force u32)p->csum; + if (unlikely(credits < csum)) { + pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", + csk, csk->tid, + credits, csk->wr_cred, csk->wr_una_cred, + csum); + p->csum = (__force __wsum)(csum - credits); + break; + } + + cxgbit_sock_dequeue_wr(csk); + credits -= csum; + kfree_skb(p); + } + + if (unlikely(cxgbit_credit_err(csk))) { + cxgbit_queue_rx_skb(csk, skb); + return; + } + + if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) { + if (unlikely(before(snd_una, csk->snd_una))) { + pr_warn("csk 0x%p,%u, snd_una %u/%u.", + csk, csk->tid, snd_una, + csk->snd_una); + goto rel_skb; + } + + if (csk->snd_una != snd_una) { + csk->snd_una = snd_una; + dst_confirm(csk->dst); + } + } + + if (skb_queue_len(&csk->txq)) + cxgbit_push_tx_frames(csk); + +rel_skb: + __kfree_skb(skb); +} + +static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk; + struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } else { + cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); + } + + cxgbit_put_csk(csk); +rel_skb: + __kfree_skb(skb); +} + +static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk; + struct cpl_rx_data *cpl = cplhdr(skb); + unsigned int tid = GET_TID(cpl); + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + cxgbit_queue_rx_skb(csk, skb); + return; +rel_skb: + __kfree_skb(skb); +} + +static void +__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + spin_lock(&csk->lock); + if (csk->lock_owner) { + __skb_queue_tail(&csk->backlogq, skb); + spin_unlock(&csk->lock); + return; + } + + cxgbit_skcb_rx_backlog_fn(skb)(csk, skb); + spin_unlock(&csk->lock); +} + +static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + cxgbit_get_csk(csk); + __cxgbit_process_rx_cpl(csk, skb); + cxgbit_put_csk(csk); +} + +static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb) +{ + struct cxgbit_sock *csk; + struct cpl_tx_data *cpl = cplhdr(skb); + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct tid_info *t = lldi->tids; + unsigned int tid = GET_TID(cpl); + u8 opcode = cxgbit_skcb_rx_opcode(skb); + bool ref = true; + + switch (opcode) { + case CPL_FW4_ACK: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack; + ref = false; + break; + case CPL_PEER_CLOSE: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close; + break; + case CPL_CLOSE_CON_RPL: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl; + break; + case CPL_ABORT_REQ_RSS: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss; + break; + case CPL_ABORT_RPL_RSS: + cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss; + break; + default: + goto rel_skb; + } + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + if (ref) + cxgbit_process_rx_cpl(csk, skb); + else + __cxgbit_process_rx_cpl(csk, skb); + + return; +rel_skb: + __kfree_skb(skb); +} + +cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = { + [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl, + [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl, + [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req, + [CPL_PASS_ESTABLISH] = cxgbit_pass_establish, + [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl, + [CPL_RX_DATA] = cxgbit_rx_data, + [CPL_FW4_ACK] = cxgbit_rx_cpl, + [CPL_PEER_CLOSE] = cxgbit_rx_cpl, + [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl, + [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl, + [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl, +}; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c new file mode 100644 index 0000000000..17fd0d8cc4 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + */ + +#include "cxgbit.h" + +static void +cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod, + struct cxgbi_task_tag_info *ttinfo, + struct scatterlist **sg_pp, unsigned int *sg_off) +{ + struct scatterlist *sg = sg_pp ? *sg_pp : NULL; + unsigned int offset = sg_off ? *sg_off : 0; + dma_addr_t addr = 0UL; + unsigned int len = 0; + int i; + + memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); + + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } + + for (i = 0; i < PPOD_PAGES_MAX; i++) { + if (sg) { + ppod->addr[i] = cpu_to_be64(addr + offset); + offset += PAGE_SIZE; + if (offset == (len + sg->offset)) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } + } + } else { + ppod->addr[i] = 0ULL; + } + } + + /* + * the fifth address needs to be repeated in the next ppod, so do + * not move sg + */ + if (sg_pp) { + *sg_pp = sg; + *sg_off = offset; + } + + if (offset == len) { + offset = 0; + if (sg) { + sg = sg_next(sg); + if (sg) + addr = sg_dma_address(sg); + } + } + ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; +} + +static struct sk_buff * +cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm, + unsigned int idx, unsigned int npods, unsigned int tid) +{ + struct ulp_mem_io *req; + struct ulptx_idata *idata; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int dlen = npods << PPOD_SIZE_SHIFT; + unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + + sizeof(struct ulptx_idata) + dlen, 16); + struct sk_buff *skb; + + skb = alloc_skb(wr_len, GFP_KERNEL); + if (!skb) + return NULL; + + req = __skb_put(skb, wr_len); + INIT_ULPTX_WR(req, wr_len, 0, tid); + req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | + FW_WR_ATOMIC_V(0)); + req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + ULP_MEMIO_ORDER_V(0) | + T5_ULP_MEMIO_IMM_V(1)); + req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); + req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); + req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); + + idata = (struct ulptx_idata *)(req + 1); + idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); + idata->len = htonl(dlen); + + return skb; +} + +static int +cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, + struct cxgbi_task_tag_info *ttinfo, unsigned int idx, + unsigned int npods, struct scatterlist **sg_pp, + unsigned int *sg_off) +{ + struct cxgbit_device *cdev = csk->com.cdev; + struct sk_buff *skb; + struct ulp_mem_io *req; + struct ulptx_idata *idata; + struct cxgbi_pagepod *ppod; + unsigned int i; + + skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid); + if (!skb) + return -ENOMEM; + + req = (struct ulp_mem_io *)skb->data; + idata = (struct ulptx_idata *)(req + 1); + ppod = (struct cxgbi_pagepod *)(idata + 1); + + for (i = 0; i < npods; i++, ppod++) + cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); + + __skb_queue_tail(&csk->ppodq, skb); + + return 0; +} + +static int +cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, + struct cxgbi_task_tag_info *ttinfo) +{ + unsigned int pidx = ttinfo->idx; + unsigned int npods = ttinfo->npods; + unsigned int i, cnt; + struct scatterlist *sg = ttinfo->sgl; + unsigned int offset = 0; + int ret = 0; + + for (i = 0; i < npods; i += cnt, pidx += cnt) { + cnt = npods - i; + + if (cnt > ULPMEM_IDATA_MAX_NPPODS) + cnt = ULPMEM_IDATA_MAX_NPPODS; + + ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, + &sg, &offset); + if (ret < 0) + break; + } + + return ret; +} + +static int cxgbit_ddp_sgl_check(struct scatterlist *sg, + unsigned int nents) +{ + unsigned int last_sgidx = nents - 1; + unsigned int i; + + for (i = 0; i < nents; i++, sg = sg_next(sg)) { + unsigned int len = sg->length + sg->offset; + + if ((sg->offset & 0x3) || (i && sg->offset) || + ((i != last_sgidx) && (len != PAGE_SIZE))) { + return -EINVAL; + } + } + + return 0; +} + +static int +cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, + unsigned int xferlen) +{ + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbi_ppm *ppm = cdev2ppm(cdev); + struct scatterlist *sgl = ttinfo->sgl; + unsigned int sgcnt = ttinfo->nents; + unsigned int sg_offset = sgl->offset; + int ret; + + if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) { + pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", + ppm, ppm->tformat.pgsz_idx_dflt, + xferlen, ttinfo->nents); + return -EINVAL; + } + + if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0) + return -EINVAL; + + ttinfo->nr_pages = (xferlen + sgl->offset + + (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; + + /* + * the ddp tag will be used for the ttt in the outgoing r2t pdu + */ + ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, + &ttinfo->tag, 0); + if (ret < 0) + return ret; + ttinfo->npods = ret; + + sgl->offset = 0; + ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + sgl->offset = sg_offset; + if (!ret) { + pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", + __func__, 0, xferlen, sgcnt); + goto rel_ppods; + } + + cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, + xferlen, &ttinfo->hdr); + + ret = cxgbit_ddp_set_map(ppm, csk, ttinfo); + if (ret < 0) { + __skb_queue_purge(&csk->ppodq); + dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + goto rel_ppods; + } + + return 0; + +rel_ppods: + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + return -EINVAL; +} + +void +cxgbit_get_r2t_ttt(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_r2t *r2t) +{ + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); + struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; + int ret; + + if ((!ccmd->setup_ddp) || + (!test_bit(CSK_DDP_ENABLE, &csk->com.flags))) + goto out; + + ccmd->setup_ddp = false; + + ttinfo->sgl = cmd->se_cmd.t_data_sg; + ttinfo->nents = cmd->se_cmd.t_data_nents; + + ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); + if (ret < 0) { + pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", + csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); + + ttinfo->sgl = NULL; + ttinfo->nents = 0; + } else { + ccmd->release = true; + } +out: + pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag); + r2t->targ_xfer_tag = ttinfo->tag; +} + +void cxgbit_unmap_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd) +{ + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); + + if (ccmd->release) { + if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { + put_page(sg_page(&ccmd->sg)); + } else { + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbi_ppm *ppm = cdev2ppm(cdev); + struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; + + /* Abort the TCP conn if DDP is not complete to + * avoid any possibility of DDP after freeing + * the cmd. + */ + if (unlikely(cmd->write_data_done != + cmd->se_cmd.data_length)) + cxgbit_abort_conn(csk); + + if (unlikely(ttinfo->sgl)) { + dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, + ttinfo->nents, DMA_FROM_DEVICE); + ttinfo->nents = 0; + ttinfo->sgl = NULL; + } + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + } + ccmd->release = false; + } +} + +int cxgbit_ddp_init(struct cxgbit_device *cdev) +{ + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct net_device *ndev = cdev->lldi.ports[0]; + struct cxgbi_tag_format tformat; + int ret, i; + + if (!lldi->vr->iscsi.size) { + pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); + return -EACCES; + } + + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); + for (i = 0; i < 4; i++) + tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) + & 0xF; + cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); + + ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0], + cdev->lldi.pdev, &cdev->lldi, &tformat, + lldi->vr->iscsi.size, lldi->iscsi_llimit, + lldi->vr->iscsi.start, 2, + lldi->vr->ppod_edram.start, + lldi->vr->ppod_edram.size); + if (ret >= 0) { + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm); + + if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) && + (ppm->ppmax >= 1024)) + set_bit(CDEV_DDP_ENABLE, &cdev->flags); + ret = 0; + } + + return ret; +} diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h new file mode 100644 index 0000000000..dcaed3a1d2 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __CXGBIT_LRO_H__ +#define __CXGBIT_LRO_H__ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/skbuff.h> + +#define LRO_FLUSH_LEN_MAX 65535 + +struct cxgbit_lro_cb { + struct cxgbit_sock *csk; + u32 pdu_totallen; + u32 offset; + u8 pdu_idx; + bool complete; +}; + +enum cxgbit_pducb_flags { + PDUCBF_RX_HDR = (1 << 0), /* received pdu header */ + PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */ + PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */ + PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */ + PDUCBF_RX_DDP_CMP = (1 << 4), /* ddp completion */ + PDUCBF_RX_HCRC_ERR = (1 << 5), /* header digest error */ + PDUCBF_RX_DCRC_ERR = (1 << 6), /* data digest error */ +}; + +struct cxgbit_lro_pdu_cb { + u8 flags; + u8 frags; + u8 hfrag_idx; + u8 nr_dfrags; + u8 dfrag_idx; + bool complete; + u32 seq; + u32 pdulen; + u32 hlen; + u32 dlen; + u32 doffset; + u32 ddigest; + void *hdr; +}; + +#define LRO_SKB_MAX_HEADROOM \ + (sizeof(struct cxgbit_lro_cb) + \ + (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb))) + +#define LRO_SKB_MIN_HEADROOM \ + (sizeof(struct cxgbit_lro_cb) + \ + sizeof(struct cxgbit_lro_pdu_cb)) + +#define cxgbit_skb_lro_cb(skb) ((struct cxgbit_lro_cb *)skb->data) +#define cxgbit_skb_lro_pdu_cb(skb, i) \ + ((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \ + + (i * sizeof(struct cxgbit_lro_pdu_cb)))) + +#define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ +#define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT 19 /* pad error */ +#define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ +#define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ + +#endif /*__CXGBIT_LRO_H_*/ diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c new file mode 100644 index 0000000000..2c1950df3b --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + */ + +#define DRV_NAME "cxgbit" +#define DRV_VERSION "1.0.0-ko" +#define pr_fmt(fmt) DRV_NAME ": " fmt + +#include "cxgbit.h" + +#ifdef CONFIG_CHELSIO_T4_DCB +#include <net/dcbevent.h> +#include "cxgb4_dcb.h" +#endif + +LIST_HEAD(cdev_list_head); +/* cdev list lock */ +DEFINE_MUTEX(cdev_list_lock); + +void _cxgbit_free_cdev(struct kref *kref) +{ + struct cxgbit_device *cdev; + + cdev = container_of(kref, struct cxgbit_device, kref); + + cxgbi_ppm_release(cdev2ppm(cdev)); + kfree(cdev); +} + +static void cxgbit_set_mdsl(struct cxgbit_device *cdev) +{ + struct cxgb4_lld_info *lldi = &cdev->lldi; + u32 mdsl; + +#define CXGBIT_T5_MAX_PDU_LEN 16224 +#define CXGBIT_PDU_NONPAYLOAD_LEN 312 /* 48(BHS) + 256(AHS) + 8(Digest) */ + if (is_t5(lldi->adapter_type)) { + mdsl = min_t(u32, lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN, + CXGBIT_T5_MAX_PDU_LEN - CXGBIT_PDU_NONPAYLOAD_LEN); + } else { + mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN; + mdsl = min(mdsl, 16384U); + } + + mdsl = round_down(mdsl, 4); + mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE); + mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); + + cdev->mdsl = mdsl; +} + +static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi) +{ + struct cxgbit_device *cdev; + + if (is_t4(lldi->adapter_type)) + return ERR_PTR(-ENODEV); + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return ERR_PTR(-ENOMEM); + + kref_init(&cdev->kref); + spin_lock_init(&cdev->np_lock); + + cdev->lldi = *lldi; + + cxgbit_set_mdsl(cdev); + + if (cxgbit_ddp_init(cdev) < 0) { + kfree(cdev); + return ERR_PTR(-EINVAL); + } + + if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags)) + pr_info("cdev %s ddp init failed\n", + pci_name(lldi->pdev)); + + if (lldi->fw_vers >= 0x10d2b00) + set_bit(CDEV_ISO_ENABLE, &cdev->flags); + + spin_lock_init(&cdev->cskq.lock); + INIT_LIST_HEAD(&cdev->cskq.list); + + mutex_lock(&cdev_list_lock); + list_add_tail(&cdev->list, &cdev_list_head); + mutex_unlock(&cdev_list_lock); + + pr_info("cdev %s added for iSCSI target transport\n", + pci_name(lldi->pdev)); + + return cdev; +} + +static void cxgbit_close_conn(struct cxgbit_device *cdev) +{ + struct cxgbit_sock *csk; + struct sk_buff *skb; + bool wakeup_thread = false; + + spin_lock_bh(&cdev->cskq.lock); + list_for_each_entry(csk, &cdev->cskq.list, list) { + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + continue; + + spin_lock_bh(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + if (skb_queue_len(&csk->rxq) == 1) + wakeup_thread = true; + spin_unlock_bh(&csk->rxq.lock); + + if (wakeup_thread) { + wake_up(&csk->waitq); + wakeup_thread = false; + } + } + spin_unlock_bh(&cdev->cskq.lock); +} + +static void cxgbit_detach_cdev(struct cxgbit_device *cdev) +{ + bool free_cdev = false; + + spin_lock_bh(&cdev->cskq.lock); + if (list_empty(&cdev->cskq.list)) + free_cdev = true; + spin_unlock_bh(&cdev->cskq.lock); + + if (free_cdev) { + mutex_lock(&cdev_list_lock); + list_del(&cdev->list); + mutex_unlock(&cdev_list_lock); + + cxgbit_put_cdev(cdev); + } else { + cxgbit_close_conn(cdev); + } +} + +static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) +{ + struct cxgbit_device *cdev = handle; + + switch (state) { + case CXGB4_STATE_UP: + set_bit(CDEV_STATE_UP, &cdev->flags); + pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev)); + break; + case CXGB4_STATE_START_RECOVERY: + clear_bit(CDEV_STATE_UP, &cdev->flags); + cxgbit_close_conn(cdev); + pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev)); + break; + case CXGB4_STATE_DOWN: + pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev)); + break; + case CXGB4_STATE_DETACH: + clear_bit(CDEV_STATE_UP, &cdev->flags); + pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev)); + cxgbit_detach_cdev(cdev); + break; + default: + pr_info("cdev %s unknown state %d.\n", + pci_name(cdev->lldi.pdev), state); + break; + } + return 0; +} + +static void +cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb, + u32 ddpvld) +{ + + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld); + pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; + } + + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { + pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld); + pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; + } + + if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) + pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld); + + if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && + (!(pdu_cb->flags & PDUCBF_RX_DATA))) { + pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; + } +} + +static void +cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, + lro_cb->pdu_idx); + struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); + + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld)); + + pdu_cb->flags |= PDUCBF_RX_STATUS; + pdu_cb->ddigest = ntohl(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); + + if (pdu_cb->flags & PDUCBF_RX_HDR) + pdu_cb->complete = true; + + lro_cb->pdu_totallen += pdu_cb->pdulen; + lro_cb->complete = true; + lro_cb->pdu_idx++; +} + +static void +cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, + unsigned int offset) +{ + u8 skb_frag_idx = skb_shinfo(skb)->nr_frags; + u8 i; + + /* usually there's just one frag */ + __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, + gl->frags[0].offset + offset, + gl->frags[0].size - offset); + for (i = 1; i < gl->nfrags; i++) + __skb_fill_page_desc(skb, skb_frag_idx + i, + gl->frags[i].page, + gl->frags[i].offset, + gl->frags[i].size); + + skb_shinfo(skb)->nr_frags += gl->nfrags; + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); +} + +static void +cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, + lro_cb->pdu_idx); + u32 len, offset; + + if (op == CPL_ISCSI_HDR) { + struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va; + + offset = sizeof(struct cpl_iscsi_hdr); + pdu_cb->flags |= PDUCBF_RX_HDR; + pdu_cb->seq = ntohl(cpl->seq); + len = ntohs(cpl->len); + pdu_cb->hdr = gl->va + offset; + pdu_cb->hlen = len; + pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; + + if (unlikely(gl->nfrags > 1)) + cxgbit_skcb_flags(skb) = 0; + + lro_cb->complete = false; + } else if (op == CPL_ISCSI_DATA) { + struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; + + offset = sizeof(struct cpl_iscsi_data); + pdu_cb->flags |= PDUCBF_RX_DATA; + len = ntohs(cpl->len); + pdu_cb->dlen = len; + pdu_cb->doffset = lro_cb->offset; + pdu_cb->nr_dfrags = gl->nfrags; + pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; + lro_cb->complete = false; + } else { + struct cpl_rx_iscsi_cmp *cpl; + + cpl = (struct cpl_rx_iscsi_cmp *)gl->va; + offset = sizeof(struct cpl_rx_iscsi_cmp); + pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS); + len = be16_to_cpu(cpl->len); + pdu_cb->hdr = gl->va + offset; + pdu_cb->hlen = len; + pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; + pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc); + pdu_cb->pdulen = ntohs(cpl->len); + + if (unlikely(gl->nfrags > 1)) + cxgbit_skcb_flags(skb) = 0; + + cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, + be32_to_cpu(cpl->ddpvld)); + + if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) { + pdu_cb->flags |= PDUCBF_RX_DDP_CMP; + pdu_cb->complete = true; + } else if (pdu_cb->flags & PDUCBF_RX_DATA) { + pdu_cb->complete = true; + } + + lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen; + lro_cb->complete = true; + lro_cb->pdu_idx++; + } + + cxgbit_copy_frags(skb, gl, offset); + + pdu_cb->frags += gl->nfrags; + lro_cb->offset += len; + skb->len += len; + skb->data_len += len; + skb->truesize += len; +} + +static struct sk_buff * +cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl, + const __be64 *rsp, struct napi_struct *napi) +{ + struct sk_buff *skb; + struct cxgbit_lro_cb *lro_cb; + + skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM); + + if (unlikely(!skb)) + return NULL; + + memset(skb->data, 0, LRO_SKB_MAX_HEADROOM); + + cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO; + + lro_cb = cxgbit_skb_lro_cb(skb); + + cxgbit_get_csk(csk); + + lro_cb->csk = csk; + + return skb; +} + +static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + bool wakeup_thread = false; + + spin_lock(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + if (skb_queue_len(&csk->rxq) == 1) + wakeup_thread = true; + spin_unlock(&csk->rxq.lock); + + if (wakeup_thread) + wake_up(&csk->waitq); +} + +static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_sock *csk = lro_cb->csk; + + csk->lro_skb = NULL; + + __skb_unlink(skb, &lro_mgr->lroq); + cxgbit_queue_lro_skb(csk, skb); + + cxgbit_put_csk(csk); + + lro_mgr->lro_pkts++; + lro_mgr->lro_session_cnt--; +} + +static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr) +{ + struct sk_buff *skb; + + while ((skb = skb_peek(&lro_mgr->lroq))) + cxgbit_lro_flush(lro_mgr, skb); +} + +static int +cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp, + const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi) +{ + struct sk_buff *skb; + struct cxgbit_lro_cb *lro_cb; + + if (!csk) { + pr_err("%s: csk NULL, op 0x%x.\n", __func__, op); + goto out; + } + + if (csk->lro_skb) + goto add_packet; + +start_lro: + if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) { + cxgbit_uld_lro_flush(lro_mgr); + goto start_lro; + } + + skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi); + if (unlikely(!skb)) + goto out; + + csk->lro_skb = skb; + + __skb_queue_tail(&lro_mgr->lroq, skb); + lro_mgr->lro_session_cnt++; + +add_packet: + skb = csk->lro_skb; + lro_cb = cxgbit_skb_lro_cb(skb); + + if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) > + MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) || + (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) { + cxgbit_lro_flush(lro_mgr, skb); + goto start_lro; + } + + if (gl) + cxgbit_lro_add_packet_gl(skb, op, gl); + else + cxgbit_lro_add_packet_rsp(skb, op, rsp); + + lro_mgr->lro_merged++; + + return 0; + +out: + return -1; +} + +static int +cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, + const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi) +{ + struct cxgbit_device *cdev = hndl; + struct cxgb4_lld_info *lldi = &cdev->lldi; + struct cpl_tx_data *rpl = NULL; + struct cxgbit_sock *csk = NULL; + unsigned int tid = 0; + struct sk_buff *skb; + unsigned int op = *(u8 *)rsp; + bool lro_flush = true; + + switch (op) { + case CPL_ISCSI_HDR: + case CPL_ISCSI_DATA: + case CPL_RX_ISCSI_CMP: + case CPL_RX_ISCSI_DDP: + case CPL_FW4_ACK: + lro_flush = false; + fallthrough; + case CPL_ABORT_RPL_RSS: + case CPL_PASS_ESTABLISH: + case CPL_PEER_CLOSE: + case CPL_CLOSE_CON_RPL: + case CPL_ABORT_REQ_RSS: + case CPL_SET_TCB_RPL: + case CPL_RX_DATA: + rpl = gl ? (struct cpl_tx_data *)gl->va : + (struct cpl_tx_data *)(rsp + 1); + tid = GET_TID(rpl); + csk = lookup_tid(lldi->tids, tid); + break; + default: + break; + } + + if (csk && csk->lro_skb && lro_flush) + cxgbit_lro_flush(lro_mgr, csk->lro_skb); + + if (!gl) { + unsigned int len; + + if (op == CPL_RX_ISCSI_DDP) { + if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr, + napi)) + return 0; + } + + len = 64 - sizeof(struct rsp_ctrl) - 8; + skb = napi_alloc_skb(napi, len); + if (!skb) + goto nomem; + __skb_put(skb, len); + skb_copy_to_linear_data(skb, &rsp[1], len); + } else { + if (unlikely(op != *(u8 *)gl->va)) { + pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", + gl->va, be64_to_cpu(*rsp), + get_unaligned_be64(gl->va), + gl->tot_len); + return 0; + } + + if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) || + (op == CPL_RX_ISCSI_CMP)) { + if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, + napi)) + return 0; + } + +#define RX_PULL_LEN 128 + skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) + goto nomem; + } + + rpl = (struct cpl_tx_data *)skb->data; + op = rpl->ot.opcode; + cxgbit_skcb_rx_opcode(skb) = op; + + pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", + cdev, op, rpl->ot.opcode_tid, + ntohl(rpl->ot.opcode_tid), skb); + + if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) { + cxgbit_cplhandlers[op](cdev, skb); + } else { + pr_err("No handler for opcode 0x%x.\n", op); + __kfree_skb(skb); + } + return 0; +nomem: + pr_err("%s OOM bailing out.\n", __func__); + return 1; +} + +#ifdef CONFIG_CHELSIO_T4_DCB +struct cxgbit_dcb_work { + struct dcb_app_type dcb_app; + struct work_struct work; +}; + +static void +cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id, + u8 dcb_priority, u16 port_num) +{ + struct cxgbit_sock *csk; + struct sk_buff *skb; + u16 local_port; + bool wakeup_thread = false; + + spin_lock_bh(&cdev->cskq.lock); + list_for_each_entry(csk, &cdev->cskq.list, list) { + if (csk->port_id != port_id) + continue; + + if (csk->com.local_addr.ss_family == AF_INET6) { + struct sockaddr_in6 *sock_in6; + + sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr; + local_port = ntohs(sock_in6->sin6_port); + } else { + struct sockaddr_in *sock_in; + + sock_in = (struct sockaddr_in *)&csk->com.local_addr; + local_port = ntohs(sock_in->sin_port); + } + + if (local_port != port_num) + continue; + + if (csk->dcb_priority == dcb_priority) + continue; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + continue; + + spin_lock(&csk->rxq.lock); + __skb_queue_tail(&csk->rxq, skb); + if (skb_queue_len(&csk->rxq) == 1) + wakeup_thread = true; + spin_unlock(&csk->rxq.lock); + + if (wakeup_thread) { + wake_up(&csk->waitq); + wakeup_thread = false; + } + } + spin_unlock_bh(&cdev->cskq.lock); +} + +static void cxgbit_dcb_workfn(struct work_struct *work) +{ + struct cxgbit_dcb_work *dcb_work; + struct net_device *ndev; + struct cxgbit_device *cdev = NULL; + struct dcb_app_type *iscsi_app; + u8 priority, port_id = 0xff; + + dcb_work = container_of(work, struct cxgbit_dcb_work, work); + iscsi_app = &dcb_work->dcb_app; + + if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { + if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && + (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) + goto out; + + priority = iscsi_app->app.priority; + + } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { + if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) + goto out; + + if (!iscsi_app->app.priority) + goto out; + + priority = ffs(iscsi_app->app.priority) - 1; + } else { + goto out; + } + + pr_debug("priority for ifid %d is %u\n", + iscsi_app->ifindex, priority); + + ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); + + if (!ndev) + goto out; + + mutex_lock(&cdev_list_lock); + cdev = cxgbit_find_device(ndev, &port_id); + + dev_put(ndev); + + if (!cdev) { + mutex_unlock(&cdev_list_lock); + goto out; + } + + cxgbit_update_dcb_priority(cdev, port_id, priority, + iscsi_app->app.protocol); + mutex_unlock(&cdev_list_lock); +out: + kfree(dcb_work); +} + +static int +cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct cxgbit_dcb_work *dcb_work; + struct dcb_app_type *dcb_app = data; + + dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC); + if (!dcb_work) + return NOTIFY_DONE; + + dcb_work->dcb_app = *dcb_app; + INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn); + schedule_work(&dcb_work->work); + return NOTIFY_OK; +} +#endif + +static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsit_conn *conn) +{ + return TARGET_PROT_NORMAL; +} + +static struct iscsit_transport cxgbit_transport = { + .name = DRV_NAME, + .transport_type = ISCSI_CXGBIT, + .rdma_shutdown = false, + .priv_size = sizeof(struct cxgbit_cmd), + .owner = THIS_MODULE, + .iscsit_setup_np = cxgbit_setup_np, + .iscsit_accept_np = cxgbit_accept_np, + .iscsit_free_np = cxgbit_free_np, + .iscsit_free_conn = cxgbit_free_conn, + .iscsit_get_login_rx = cxgbit_get_login_rx, + .iscsit_put_login_tx = cxgbit_put_login_tx, + .iscsit_immediate_queue = iscsit_immediate_queue, + .iscsit_response_queue = iscsit_response_queue, + .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, + .iscsit_queue_data_in = iscsit_queue_rsp, + .iscsit_queue_status = iscsit_queue_rsp, + .iscsit_xmit_pdu = cxgbit_xmit_pdu, + .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt, + .iscsit_get_rx_pdu = cxgbit_get_rx_pdu, + .iscsit_validate_params = cxgbit_validate_params, + .iscsit_unmap_cmd = cxgbit_unmap_cmd, + .iscsit_aborted_task = iscsit_aborted_task, + .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops, +}; + +static struct cxgb4_uld_info cxgbit_uld_info = { + .name = DRV_NAME, + .nrxq = MAX_ULD_QSETS, + .ntxq = MAX_ULD_QSETS, + .rxq_size = 1024, + .lro = true, + .add = cxgbit_uld_add, + .state_change = cxgbit_uld_state_change, + .lro_rx_handler = cxgbit_uld_lro_rx_handler, + .lro_flush = cxgbit_uld_lro_flush, +}; + +#ifdef CONFIG_CHELSIO_T4_DCB +static struct notifier_block cxgbit_dcbevent_nb = { + .notifier_call = cxgbit_dcbevent_notify, +}; +#endif + +static int __init cxgbit_init(void) +{ + cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info); + iscsit_register_transport(&cxgbit_transport); + +#ifdef CONFIG_CHELSIO_T4_DCB + pr_info("%s dcb enabled.\n", DRV_NAME); + register_dcbevent_notifier(&cxgbit_dcbevent_nb); +#endif + BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < + sizeof(union cxgbit_skb_cb)); + return 0; +} + +static void __exit cxgbit_exit(void) +{ + struct cxgbit_device *cdev, *tmp; + +#ifdef CONFIG_CHELSIO_T4_DCB + unregister_dcbevent_notifier(&cxgbit_dcbevent_nb); +#endif + mutex_lock(&cdev_list_lock); + list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) { + list_del(&cdev->list); + cxgbit_put_cdev(cdev); + } + mutex_unlock(&cdev_list_lock); + iscsit_unregister_transport(&cxgbit_transport); + cxgb4_unregister_uld(CXGB4_ULD_ISCSIT); +} + +module_init(cxgbit_init); +module_exit(cxgbit_exit); + +MODULE_DESCRIPTION("Chelsio iSCSI target offload driver"); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c new file mode 100644 index 0000000000..acfc39683c --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -0,0 +1,1654 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016 Chelsio Communications, Inc. + */ + +#include <linux/workqueue.h> +#include <linux/kthread.h> +#include <linux/sched/signal.h> + +#include <asm/unaligned.h> +#include <net/tcp.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include "cxgbit.h" + +struct sge_opaque_hdr { + void *dev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +static const u8 cxgbit_digest_len[] = {0, 4, 4, 8}; + +#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \ + sizeof(struct fw_ofld_tx_data_wr)) + +static struct sk_buff * +__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso) +{ + struct sk_buff *skb = NULL; + u8 submode = 0; + int errcode; + static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN; + + if (len) { + skb = alloc_skb_with_frags(hdr_len, len, + 0, &errcode, + GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, TX_HDR_LEN); + skb_reset_transport_header(skb); + __skb_put(skb, ISCSI_HDR_LEN); + skb->data_len = len; + skb->len += len; + submode |= (csk->submode & CXGBIT_SUBMODE_DCRC); + + } else { + u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0; + + skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, TX_HDR_LEN + iso_len); + skb_reset_transport_header(skb); + __skb_put(skb, ISCSI_HDR_LEN); + } + + submode |= (csk->submode & CXGBIT_SUBMODE_HCRC); + cxgbit_skcb_submode(skb) = submode; + cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode]; + cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR; + return skb; +} + +static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len) +{ + return __cxgbit_alloc_skb(csk, len, false); +} + +/* + * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static int cxgbit_is_ofld_imm(const struct sk_buff *skb) +{ + int length = skb->len; + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) + length += sizeof(struct fw_ofld_tx_data_wr); + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)) + length += sizeof(struct cpl_tx_data_iso); + + return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN; +} + +/* + * cxgbit_sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int cxgbit_sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/* + * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (cxgbit_is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + flits = skb_transport_offset(skb) / 8; + cnt = skb_shinfo(skb)->nr_frags; + if (skb_tail_pointer(skb) != skb_transport_header(skb)) + cnt++; + return flits + cxgbit_sgl_len(cnt); +} + +#define CXGBIT_ISO_FSLICE 0x1 +#define CXGBIT_ISO_LSLICE 0x2 +static void +cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info) +{ + struct cpl_tx_data_iso *cpl; + unsigned int submode = cxgbit_skcb_submode(skb); + unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE); + unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE); + + cpl = __skb_push(skb, sizeof(*cpl)); + + cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) | + CPL_TX_DATA_ISO_FIRST_V(fslice) | + CPL_TX_DATA_ISO_LAST_V(lslice) | + CPL_TX_DATA_ISO_CPLHDRLEN_V(0) | + CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) | + CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) | + CPL_TX_DATA_ISO_IMMEDIATE_V(0) | + CPL_TX_DATA_ISO_SCSI_V(2)); + + cpl->ahs_len = 0; + cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4)); + cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4)); + cpl->len = htonl(iso_info->len); + cpl->reserved2_seglen_offset = htonl(0); + cpl->datasn_offset = htonl(0); + cpl->buffer_offset = htonl(0); + cpl->reserved3 = 0; + + __skb_pull(skb, sizeof(*cpl)); +} + +static void +cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, + u32 len, u32 credits, u32 compl) +{ + struct fw_ofld_tx_data_wr *req; + const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; + u32 submode = cxgbit_skcb_submode(skb); + u32 wr_ulp_mode = 0; + u32 hdr_size = sizeof(*req); + u32 opcode = FW_OFLD_TX_DATA_WR; + u32 immlen = 0; + u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : + T6_TX_FORCE_F; + + if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) { + opcode = FW_ISCSI_TX_DATA_WR; + immlen += sizeof(struct cpl_tx_data_iso); + hdr_size += sizeof(struct cpl_tx_data_iso); + submode |= 8; + } + + if (cxgbit_is_ofld_imm(skb)) + immlen += dlen; + + req = __skb_push(skb, hdr_size); + req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) | + FW_WR_COMPL_V(compl) | + FW_WR_IMMDLEN_V(immlen)); + req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | + FW_WR_LEN16_V(credits)); + req->plen = htonl(len); + wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | + FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); + + req->tunnel_to_proxy = htonl(wr_ulp_mode | force | + FW_OFLD_TX_DATA_WR_SHOVE_F); +} + +static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) +{ + kfree_skb(skb); +} + +void cxgbit_push_tx_frames(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + + while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) { + u32 dlen = skb->len; + u32 len = skb->len; + u32 credits_needed; + u32 compl = 0; + u32 flowclen16 = 0; + u32 iso_cpl_len = 0; + + if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) + iso_cpl_len = sizeof(struct cpl_tx_data_iso); + + if (cxgbit_is_ofld_imm(skb)) + credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16); + else + credits_needed = DIV_ROUND_UP((8 * + cxgbit_calc_tx_flits_ofld(skb)) + + iso_cpl_len, 16); + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) + credits_needed += DIV_ROUND_UP( + sizeof(struct fw_ofld_tx_data_wr), 16); + /* + * Assumes the initial credits is large enough to support + * fw_flowc_wr plus largest possible first payload + */ + + if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) { + flowclen16 = cxgbit_send_tx_flowc_wr(csk); + csk->wr_cred -= flowclen16; + csk->wr_una_cred += flowclen16; + } + + if (csk->wr_cred < credits_needed) { + pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n", + csk, skb->len, skb->data_len, + credits_needed, csk->wr_cred); + break; + } + __skb_unlink(skb, &csk->txq); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); + skb->csum = (__force __wsum)(credits_needed + flowclen16); + csk->wr_cred -= credits_needed; + csk->wr_una_cred += credits_needed; + + pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", + csk, skb->len, skb->data_len, credits_needed, + csk->wr_cred, csk->wr_una_cred); + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) { + len += cxgbit_skcb_tx_extralen(skb); + + if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || + (!before(csk->write_seq, + csk->snd_una + csk->snd_win))) { + compl = 1; + csk->wr_una_cred = 0; + } + + cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed, + compl); + csk->snd_nxt += len; + + } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) || + (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { + struct cpl_close_con_req *req = + (struct cpl_close_con_req *)skb->data; + req->wr.wr_hi |= htonl(FW_WR_COMPL_F); + csk->wr_una_cred = 0; + } + + cxgbit_sock_enqueue_wr(csk, skb); + t4_set_arp_err_handler(skb, csk, + cxgbit_arp_failure_skb_discard); + + pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n", + csk, csk->tid, skb, len); + + cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); + } +} + +static void cxgbit_unlock_sock(struct cxgbit_sock *csk) +{ + struct sk_buff_head backlogq; + struct sk_buff *skb; + void (*fn)(struct cxgbit_sock *, struct sk_buff *); + + skb_queue_head_init(&backlogq); + + spin_lock_bh(&csk->lock); + while (skb_queue_len(&csk->backlogq)) { + skb_queue_splice_init(&csk->backlogq, &backlogq); + spin_unlock_bh(&csk->lock); + + while ((skb = __skb_dequeue(&backlogq))) { + fn = cxgbit_skcb_rx_backlog_fn(skb); + fn(csk, skb); + } + + spin_lock_bh(&csk->lock); + } + + csk->lock_owner = false; + spin_unlock_bh(&csk->lock); +} + +static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + int ret = 0; + + spin_lock_bh(&csk->lock); + csk->lock_owner = true; + spin_unlock_bh(&csk->lock); + + if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) || + signal_pending(current))) { + __kfree_skb(skb); + __skb_queue_purge(&csk->ppodq); + ret = -1; + goto unlock; + } + + csk->write_seq += skb->len + + cxgbit_skcb_tx_extralen(skb); + + skb_queue_splice_tail_init(&csk->ppodq, &csk->txq); + __skb_queue_tail(&csk->txq, skb); + cxgbit_push_tx_frames(csk); + +unlock: + cxgbit_unlock_sock(csk); + return ret; +} + +static int +cxgbit_map_skb(struct iscsit_cmd *cmd, struct sk_buff *skb, u32 data_offset, + u32 data_length) +{ + u32 i = 0, nr_frags = MAX_SKB_FRAGS; + u32 padding = ((-data_length) & 3); + struct scatterlist *sg; + struct page *page; + unsigned int page_off; + + if (padding) + nr_frags--; + + /* + * We know each entry in t_data_sg contains a page. + */ + sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; + page_off = (data_offset % PAGE_SIZE); + + while (data_length && (i < nr_frags)) { + u32 cur_len = min_t(u32, data_length, sg->length - page_off); + + page = sg_page(sg); + + get_page(page); + skb_fill_page_desc(skb, i, page, sg->offset + page_off, + cur_len); + skb->data_len += cur_len; + skb->len += cur_len; + skb->truesize += cur_len; + + data_length -= cur_len; + page_off = 0; + sg = sg_next(sg); + i++; + } + + if (data_length) + return -1; + + if (padding) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -1; + skb_fill_page_desc(skb, i, page, 0, padding); + skb->data_len += padding; + skb->len += padding; + skb->truesize += padding; + } + + return 0; +} + +static int +cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd, + struct iscsi_datain_req *dr) +{ + struct iscsit_conn *conn = csk->conn; + struct sk_buff *skb; + struct iscsi_datain datain; + struct cxgbit_iso_info iso_info; + u32 data_length = cmd->se_cmd.data_length; + u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; + u32 num_pdu, plen, tx_data = 0; + bool task_sense = !!(cmd->se_cmd.se_cmd_flags & + SCF_TRANSPORT_TASK_SENSE); + bool set_statsn = false; + int ret = -1; + + while (data_length) { + num_pdu = (data_length + mrdsl - 1) / mrdsl; + if (num_pdu > csk->max_iso_npdu) + num_pdu = csk->max_iso_npdu; + + plen = num_pdu * mrdsl; + if (plen > data_length) + plen = data_length; + + skb = __cxgbit_alloc_skb(csk, 0, true); + if (unlikely(!skb)) + return -ENOMEM; + + memset(skb->data, 0, ISCSI_HDR_LEN); + cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO; + cxgbit_skcb_submode(skb) |= (csk->submode & + CXGBIT_SUBMODE_DCRC); + cxgbit_skcb_tx_extralen(skb) = (num_pdu * + cxgbit_digest_len[cxgbit_skcb_submode(skb)]) + + ((num_pdu - 1) * ISCSI_HDR_LEN); + + memset(&datain, 0, sizeof(struct iscsi_datain)); + memset(&iso_info, 0, sizeof(iso_info)); + + if (!tx_data) + iso_info.flags |= CXGBIT_ISO_FSLICE; + + if (!(data_length - plen)) { + iso_info.flags |= CXGBIT_ISO_LSLICE; + if (!task_sense) { + datain.flags = ISCSI_FLAG_DATA_STATUS; + iscsit_increment_maxcmdsn(cmd, conn->sess); + cmd->stat_sn = conn->stat_sn++; + set_statsn = true; + } + } + + iso_info.burst_len = num_pdu * mrdsl; + iso_info.mpdu = mrdsl; + iso_info.len = ISCSI_HDR_LEN + plen; + + cxgbit_cpl_tx_data_iso(skb, &iso_info); + + datain.offset = tx_data; + datain.data_sn = cmd->data_sn - 1; + + iscsit_build_datain_pdu(cmd, conn, &datain, + (struct iscsi_data_rsp *)skb->data, + set_statsn); + + ret = cxgbit_map_skb(cmd, skb, tx_data, plen); + if (unlikely(ret)) { + __kfree_skb(skb); + goto out; + } + + ret = cxgbit_queue_skb(csk, skb); + if (unlikely(ret)) + goto out; + + tx_data += plen; + data_length -= plen; + + cmd->read_data_done += plen; + cmd->data_sn += num_pdu; + } + + dr->dr_complete = DATAIN_COMPLETE_NORMAL; + + return 0; + +out: + return ret; +} + +static int +cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd, + const struct iscsi_datain *datain) +{ + struct sk_buff *skb; + int ret = 0; + + skb = cxgbit_alloc_skb(csk, 0); + if (unlikely(!skb)) + return -ENOMEM; + + memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); + + if (datain->length) { + cxgbit_skcb_submode(skb) |= (csk->submode & + CXGBIT_SUBMODE_DCRC); + cxgbit_skcb_tx_extralen(skb) = + cxgbit_digest_len[cxgbit_skcb_submode(skb)]; + } + + ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length); + if (ret < 0) { + __kfree_skb(skb); + return ret; + } + + return cxgbit_queue_skb(csk, skb); +} + +static int +cxgbit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_datain_req *dr, + const struct iscsi_datain *datain) +{ + struct cxgbit_sock *csk = conn->context; + u32 data_length = cmd->se_cmd.data_length; + u32 padding = ((-data_length) & 3); + u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; + + if ((data_length > mrdsl) && (!dr->recovery) && + (!padding) && (!datain->offset) && csk->max_iso_npdu) { + atomic_long_add(data_length - datain->length, + &conn->sess->tx_data_octets); + return cxgbit_tx_datain_iso(csk, cmd, dr); + } + + return cxgbit_tx_datain(csk, cmd, datain); +} + +static int +cxgbit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + const void *data_buf, u32 data_buf_len) +{ + struct cxgbit_sock *csk = conn->context; + struct sk_buff *skb; + u32 padding = ((-data_buf_len) & 3); + + skb = cxgbit_alloc_skb(csk, data_buf_len + padding); + if (unlikely(!skb)) + return -ENOMEM; + + memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); + + if (data_buf_len) { + u32 pad_bytes = 0; + + skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len); + + if (padding) + skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len, + &pad_bytes, padding); + } + + cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[ + cxgbit_skcb_submode(skb)]; + + return cxgbit_queue_skb(csk, skb); +} + +int +cxgbit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_datain_req *dr, const void *buf, u32 buf_len) +{ + if (dr) + return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf); + else + return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); +} + +int cxgbit_validate_params(struct iscsit_conn *conn) +{ + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct iscsi_param *param; + u32 max_xmitdsl; + + param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH, + conn->param_list); + if (!param) + return -1; + + if (kstrtou32(param->value, 0, &max_xmitdsl) < 0) + return -1; + + if (max_xmitdsl > cdev->mdsl) { + if (iscsi_change_param_sprintf( + conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl)) + return -1; + } + + return 0; +} + +static int cxgbit_set_digest(struct cxgbit_sock *csk) +{ + struct iscsit_conn *conn = csk->conn; + struct iscsi_param *param; + + param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list); + if (!param) { + pr_err("param not found key %s\n", HEADERDIGEST); + return -1; + } + + if (!strcmp(param->value, CRC32C)) + csk->submode |= CXGBIT_SUBMODE_HCRC; + + param = iscsi_find_param_from_key(DATADIGEST, conn->param_list); + if (!param) { + csk->submode = 0; + pr_err("param not found key %s\n", DATADIGEST); + return -1; + } + + if (!strcmp(param->value, CRC32C)) + csk->submode |= CXGBIT_SUBMODE_DCRC; + + if (cxgbit_setup_conn_digest(csk)) { + csk->submode = 0; + return -1; + } + + return 0; +} + +static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) +{ + struct iscsit_conn *conn = csk->conn; + struct iscsi_conn_ops *conn_ops = conn->conn_ops; + struct iscsi_param *param; + u32 mrdsl, mbl; + u32 max_npdu, max_iso_npdu; + u32 max_iso_payload; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(MAXBURSTLENGTH, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", MAXBURSTLENGTH); + return -1; + } + + if (kstrtou32(param->value, 0, &mbl) < 0) + return -1; + } else { + mbl = conn->sess->sess_ops->MaxBurstLength; + } + + mrdsl = conn_ops->MaxRecvDataSegmentLength; + max_npdu = mbl / mrdsl; + + max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); + + max_iso_npdu = max_iso_payload / + (ISCSI_HDR_LEN + mrdsl + + cxgbit_digest_len[csk->submode]); + + csk->max_iso_npdu = min(max_npdu, max_iso_npdu); + + if (csk->max_iso_npdu <= 1) + csk->max_iso_npdu = 0; + + return 0; +} + +/* + * cxgbit_seq_pdu_inorder() + * @csk: pointer to cxgbit socket structure + * + * This function checks whether data sequence and data + * pdu are in order. + * + * Return: returns -1 on error, 0 if data sequence and + * data pdu are in order, 1 if data sequence or data pdu + * is not in order. + */ +static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk) +{ + struct iscsit_conn *conn = csk->conn; + struct iscsi_param *param; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(DATASEQUENCEINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATASEQUENCEINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + param = iscsi_find_param_from_key(DATAPDUINORDER, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", DATAPDUINORDER); + return -1; + } + + if (strcmp(param->value, YES)) + return 1; + + } else { + if (!conn->sess->sess_ops->DataSequenceInOrder) + return 1; + if (!conn->sess->sess_ops->DataPDUInOrder) + return 1; + } + + return 0; +} + +static int cxgbit_set_params(struct iscsit_conn *conn) +{ + struct cxgbit_sock *csk = conn->context; + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm; + struct iscsi_conn_ops *conn_ops = conn->conn_ops; + struct iscsi_param *param; + u8 erl; + + if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) + conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; + + if (cxgbit_set_digest(csk)) + return -1; + + if (conn->login->leading_connection) { + param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, + conn->param_list); + if (!param) { + pr_err("param not found key %s\n", ERRORRECOVERYLEVEL); + return -1; + } + if (kstrtou8(param->value, 0, &erl) < 0) + return -1; + } else { + erl = conn->sess->sess_ops->ErrorRecoveryLevel; + } + + if (!erl) { + int ret; + + ret = cxgbit_seq_pdu_inorder(csk); + if (ret < 0) { + return -1; + } else if (ret > 0) { + if (is_t5(cdev->lldi.adapter_type)) + goto enable_ddp; + else + return 0; + } + + if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { + if (cxgbit_set_iso_npdu(csk)) + return -1; + } + +enable_ddp: + if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) { + if (cxgbit_setup_conn_pgidx(csk, + ppm->tformat.pgsz_idx_dflt)) + return -1; + set_bit(CSK_DDP_ENABLE, &csk->com.flags); + } + } + + return 0; +} + +int +cxgbit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login, + u32 length) +{ + struct cxgbit_sock *csk = conn->context; + struct sk_buff *skb; + u32 padding_buf = 0; + u8 padding = ((-length) & 3); + + skb = cxgbit_alloc_skb(csk, length + padding); + if (!skb) + return -ENOMEM; + skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN); + skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length); + + if (padding) + skb_store_bits(skb, ISCSI_HDR_LEN + length, + &padding_buf, padding); + + if (login->login_complete) { + if (cxgbit_set_params(conn)) { + kfree_skb(skb); + return -1; + } + + set_bit(CSK_LOGIN_DONE, &csk->com.flags); + } + + if (cxgbit_queue_skb(csk, skb)) + return -1; + + if ((!login->login_complete) && (!login->login_failed)) + schedule_delayed_work(&conn->login_work, 0); + + return 0; +} + +static void +cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, + unsigned int nents, u32 skip) +{ + struct skb_seq_state st; + const u8 *buf; + unsigned int consumed = 0, buf_len; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb); + + skb_prepare_seq_read(skb, pdu_cb->doffset, + pdu_cb->doffset + pdu_cb->dlen, + &st); + + while (true) { + buf_len = skb_seq_read(consumed, &buf, &st); + if (!buf_len) { + skb_abort_seq_read(&st); + break; + } + + consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, + buf_len, skip + consumed); + } +} + +static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk) +{ + struct iscsit_conn *conn = csk->conn; + struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev); + struct cxgbit_cmd *ccmd; + struct iscsit_cmd *cmd; + + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) { + pr_err("Unable to allocate iscsit_cmd + cxgbit_cmd\n"); + return NULL; + } + + ccmd = iscsit_priv_cmd(cmd); + ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask; + ccmd->setup_ddp = true; + + return cmd; +} + +static int +cxgbit_handle_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr, + u32 length) +{ + struct iscsit_conn *conn = cmd->conn; + struct cxgbit_sock *csk = conn->context; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + pr_err("ImmediateData CRC32C DataDigest error\n"); + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " Immediate Data digest failure while" + " in ERL=0.\n"); + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, + (unsigned char *)hdr); + return IMMEDIATE_DATA_CANNOT_RECOVER; + } + + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, + (unsigned char *)hdr); + return IMMEDIATE_DATA_ERL1_CRC_FAILURE; + } + + if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { + struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); + struct skb_shared_info *ssi = skb_shinfo(csk->skb); + skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx]; + + sg_init_table(&ccmd->sg, 1); + sg_set_page(&ccmd->sg, skb_frag_page(dfrag), + skb_frag_size(dfrag), skb_frag_off(dfrag)); + get_page(skb_frag_page(dfrag)); + + cmd->se_cmd.t_data_sg = &ccmd->sg; + cmd->se_cmd.t_data_nents = 1; + + ccmd->release = true; + } else { + struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; + u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); + + cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0); + } + + cmd->write_data_done += pdu_cb->dlen; + + if (cmd->write_data_done == cmd->se_cmd.data_length) { + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + } + + return IMMEDIATE_DATA_NORMAL_OPERATION; +} + +static int +cxgbit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr, + bool dump_payload) +{ + struct iscsit_conn *conn = cmd->conn; + int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; + /* + * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. + */ + if (dump_payload) + goto after_immediate_data; + + immed_ret = cxgbit_handle_immediate_data(cmd, hdr, + cmd->first_burst_len); +after_immediate_data: + if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { + /* + * A PDU/CmdSN carrying Immediate Data passed + * DataCRC, check against ExpCmdSN/MaxCmdSN if + * Immediate Bit is not set. + */ + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, + hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + + if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); + return 0; + } else if (cmd->unsolicited_data) { + iscsit_set_unsolicited_dataout(cmd); + } + + } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { + /* + * Immediate Data failed DataCRC and ERL>=1, + * silently drop this PDU and let the initiator + * plug the CmdSN gap. + * + * FIXME: Send Unsolicited NOPIN with reserved + * TTT here to help the initiator figure out + * the missing CmdSN, although they should be + * intelligent enough to determine the missing + * CmdSN and issue a retry to plug the sequence. + */ + cmd->i_state = ISTATE_REMOVE; + iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); + } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ + return -1; + + return 0; +} + +static int +cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd) +{ + struct iscsit_conn *conn = csk->conn; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr; + int rc; + bool dump_payload = false; + + rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr); + if (rc < 0) + return rc; + + if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) && + (pdu_cb->nr_dfrags == 1)) + cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); + if (rc < 0) + return 0; + else if (rc > 0) + dump_payload = true; + + if (!pdu_cb->dlen) + return 0; + + return cxgbit_get_immediate_data(cmd, hdr, dump_payload); +} + +static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) +{ + struct scatterlist *sg_start; + struct iscsit_conn *conn = csk->conn; + struct iscsit_cmd *cmd = NULL; + struct cxgbit_cmd *ccmd; + struct cxgbi_task_tag_info *ttinfo; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr; + u32 data_offset = be32_to_cpu(hdr->offset); + u32 data_len = ntoh24(hdr->dlength); + int rc, sg_nents, sg_off; + bool dcrc_err = false; + + if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) { + u32 offset = be32_to_cpu(hdr->offset); + u32 ddp_data_len; + bool success = false; + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0); + if (!cmd) + return 0; + + ddp_data_len = offset - cmd->write_data_done; + atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets); + + cmd->write_data_done = offset; + cmd->next_burst_len = ddp_data_len; + cmd->data_sn = be32_to_cpu(hdr->datasn); + + rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, + cmd, data_len, &success); + if (rc < 0) + return rc; + else if (!success) + return 0; + } else { + rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); + if (rc < 0) + return rc; + else if (!cmd) + return 0; + } + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + pr_err("ITT: 0x%08x, Offset: %u, Length: %u," + " DataSN: 0x%08x\n", + hdr->itt, hdr->offset, data_len, + hdr->datasn); + + dcrc_err = true; + goto check_payload; + } + + pr_debug("DataOut data_len: %u, " + "write_data_done: %u, data_length: %u\n", + data_len, cmd->write_data_done, + cmd->se_cmd.data_length); + + if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { + u32 skip = data_offset % PAGE_SIZE; + + sg_off = data_offset / PAGE_SIZE; + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; + sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE)); + + cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip); + } + + ccmd = iscsit_priv_cmd(cmd); + ttinfo = &ccmd->ttinfo; + + if (ccmd->release && ttinfo->sgl && + (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) { + struct cxgbit_device *cdev = csk->com.cdev; + struct cxgbi_ppm *ppm = cdev2ppm(cdev); + + dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, + DMA_FROM_DEVICE); + ttinfo->nents = 0; + ttinfo->sgl = NULL; + } + +check_payload: + + rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err); + if (rc < 0) + return rc; + + return 0; +} + +static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd) +{ + struct iscsit_conn *conn = csk->conn; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr; + unsigned char *ping_data = NULL; + u32 payload_length = pdu_cb->dlen; + int ret; + + ret = iscsit_setup_nop_out(conn, cmd, hdr); + if (ret < 0) + return 0; + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " NOPOUT Ping DataCRC failure while in" + " ERL=0.\n"); + ret = -1; + goto out; + } else { + /* + * drop this PDU and let the + * initiator plug the CmdSN gap. + */ + pr_info("Dropping NOPOUT" + " Command CmdSN: 0x%08x due to" + " DataCRC error.\n", hdr->cmdsn); + ret = 0; + goto out; + } + } + + /* + * Handle NOP-OUT payload for traditional iSCSI sockets + */ + if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + ping_data = kzalloc(payload_length + 1, GFP_KERNEL); + if (!ping_data) { + pr_err("Unable to allocate memory for" + " NOPOUT ping data.\n"); + ret = -1; + goto out; + } + + skb_copy_bits(csk->skb, pdu_cb->doffset, + ping_data, payload_length); + + ping_data[payload_length] = '\0'; + /* + * Attach ping data to struct iscsit_cmd->buf_ptr. + */ + cmd->buf_ptr = ping_data; + cmd->buf_ptr_size = payload_length; + + pr_debug("Got %u bytes of NOPOUT ping" + " data.\n", payload_length); + pr_debug("Ping Data: \"%s\"\n", ping_data); + } + + return iscsit_process_nop_out(conn, cmd, hdr); +out: + if (cmd) + iscsit_free_cmd(cmd, false); + return ret; +} + +static int +cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd) +{ + struct iscsit_conn *conn = csk->conn; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr; + u32 payload_length = pdu_cb->dlen; + int rc; + unsigned char *text_in = NULL; + + rc = iscsit_setup_text_cmd(conn, cmd, hdr); + if (rc < 0) + return rc; + + if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " Text Data digest failure while in" + " ERL=0.\n"); + goto reject; + } else { + /* + * drop this PDU and let the + * initiator plug the CmdSN gap. + */ + pr_info("Dropping Text" + " Command CmdSN: 0x%08x due to" + " DataCRC error.\n", hdr->cmdsn); + return 0; + } + } + + if (payload_length) { + text_in = kzalloc(payload_length, GFP_KERNEL); + if (!text_in) { + pr_err("Unable to allocate text_in of payload_length: %u\n", + payload_length); + return -ENOMEM; + } + skb_copy_bits(csk->skb, pdu_cb->doffset, + text_in, payload_length); + + text_in[payload_length - 1] = '\0'; + + cmd->text_in_ptr = text_in; + } + + return iscsit_process_text_cmd(conn, cmd, hdr); + +reject: + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + pdu_cb->hdr); +} + +static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk) +{ + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr; + struct iscsit_conn *conn = csk->conn; + struct iscsit_cmd *cmd = NULL; + u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); + int ret = -EINVAL; + + switch (opcode) { + case ISCSI_OP_SCSI_CMD: + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + + ret = cxgbit_handle_scsi_cmd(csk, cmd); + break; + case ISCSI_OP_SCSI_DATA_OUT: + ret = cxgbit_handle_iscsi_dataout(csk); + break; + case ISCSI_OP_NOOP_OUT: + if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + } + + ret = cxgbit_handle_nop_out(csk, cmd); + break; + case ISCSI_OP_SCSI_TMFUNC: + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + + ret = iscsit_handle_task_mgt_cmd(conn, cmd, + (unsigned char *)hdr); + break; + case ISCSI_OP_TEXT: + if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { + cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); + if (!cmd) + goto reject; + } else { + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + } + + ret = cxgbit_handle_text_cmd(csk, cmd); + break; + case ISCSI_OP_LOGOUT: + cmd = cxgbit_allocate_cmd(csk); + if (!cmd) + goto reject; + + ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); + if (ret > 0) + wait_for_completion_timeout(&conn->conn_logout_comp, + SECONDS_FOR_LOGOUT_COMP + * HZ); + break; + case ISCSI_OP_SNACK: + ret = iscsit_handle_snack(conn, (unsigned char *)hdr); + break; + default: + pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); + dump_stack(); + break; + } + + return ret; + +reject: + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, + (unsigned char *)hdr); + return ret; +} + +static int cxgbit_rx_opcode(struct cxgbit_sock *csk) +{ + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsit_conn *conn = csk->conn; + struct iscsi_hdr *hdr = pdu_cb->hdr; + u8 opcode; + + if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) { + atomic_long_inc(&conn->sess->conn_digest_errors); + goto transport_err; + } + + if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) + goto transport_err; + + opcode = hdr->opcode & ISCSI_OPCODE_MASK; + + if (conn->sess->sess_ops->SessionType && + ((!(opcode & ISCSI_OP_TEXT)) || + (!(opcode & ISCSI_OP_LOGOUT)))) { + pr_err("Received illegal iSCSI Opcode: 0x%02x" + " while in Discovery Session, rejecting.\n", opcode); + iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + goto transport_err; + } + + if (cxgbit_target_rx_opcode(csk) < 0) + goto transport_err; + + return 0; + +transport_err: + return -1; +} + +static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk) +{ + struct iscsit_conn *conn = csk->conn; + struct iscsi_login *login = conn->login; + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); + struct iscsi_login_req *login_req; + + login_req = (struct iscsi_login_req *)login->req; + memcpy(login_req, pdu_cb->hdr, sizeof(*login_req)); + + pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," + " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", + login_req->flags, login_req->itt, login_req->cmdsn, + login_req->exp_statsn, login_req->cid, pdu_cb->dlen); + /* + * Setup the initial iscsi_login values from the leading + * login request PDU. + */ + if (login->first_request) { + login_req = (struct iscsi_login_req *)login->req; + login->leading_connection = (!login_req->tsih) ? 1 : 0; + login->current_stage = ISCSI_LOGIN_CURRENT_STAGE( + login_req->flags); + login->version_min = login_req->min_version; + login->version_max = login_req->max_version; + memcpy(login->isid, login_req->isid, 6); + login->cmd_sn = be32_to_cpu(login_req->cmdsn); + login->init_task_tag = login_req->itt; + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); + login->cid = be16_to_cpu(login_req->cid); + login->tsih = be16_to_cpu(login_req->tsih); + } + + if (iscsi_target_check_login_request(conn, login) < 0) + return -1; + + memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); + skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen); + + return 0; +} + +static int +cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx) +{ + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx); + int ret; + + cxgbit_rx_pdu_cb(skb) = pdu_cb; + + csk->skb = skb; + + if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) { + ret = cxgbit_rx_login_pdu(csk); + set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); + } else { + ret = cxgbit_rx_opcode(csk); + } + + return ret; +} + +static void cxgbit_lro_skb_dump(struct sk_buff *skb) +{ + struct skb_shared_info *ssi = skb_shinfo(skb); + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); + u8 i; + + pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n", + skb, skb->head, skb->data, skb->len, skb->data_len, + ssi->nr_frags); + pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n", + skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen); + + for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++) + pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, " + "frags %u.\n", + skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq, + pdu_cb->ddigest, pdu_cb->frags); + for (i = 0; i < ssi->nr_frags; i++) + pr_info("skb 0x%p, frag %d, off %u, sz %u.\n", + skb, i, skb_frag_off(&ssi->frags[i]), + skb_frag_size(&ssi->frags[i])); +} + +static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) +{ + struct sk_buff *skb = csk->lro_hskb; + struct skb_shared_info *ssi = skb_shinfo(skb); + u8 i; + + memset(skb->data, 0, LRO_SKB_MIN_HEADROOM); + for (i = 0; i < ssi->nr_frags; i++) + put_page(skb_frag_page(&ssi->frags[i])); + ssi->nr_frags = 0; + skb->data_len = 0; + skb->truesize -= skb->len; + skb->len = 0; +} + +static void +cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) +{ + struct sk_buff *hskb = csk->lro_hskb; + struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx); + struct skb_shared_info *hssi = skb_shinfo(hskb); + struct skb_shared_info *ssi = skb_shinfo(skb); + unsigned int len = 0; + + if (pdu_cb->flags & PDUCBF_RX_HDR) { + u8 hfrag_idx = hssi->nr_frags; + + hpdu_cb->flags |= pdu_cb->flags; + hpdu_cb->seq = pdu_cb->seq; + hpdu_cb->hdr = pdu_cb->hdr; + hpdu_cb->hlen = pdu_cb->hlen; + + memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx], + sizeof(skb_frag_t)); + + get_page(skb_frag_page(&hssi->frags[hfrag_idx])); + hssi->nr_frags++; + hpdu_cb->frags++; + hpdu_cb->hfrag_idx = hfrag_idx; + + len = skb_frag_size(&hssi->frags[hfrag_idx]); + hskb->len += len; + hskb->data_len += len; + hskb->truesize += len; + } + + if (pdu_cb->flags & PDUCBF_RX_DATA) { + u8 dfrag_idx = hssi->nr_frags, i; + + hpdu_cb->flags |= pdu_cb->flags; + hpdu_cb->dfrag_idx = dfrag_idx; + + len = 0; + for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) { + memcpy(&hssi->frags[dfrag_idx], + &ssi->frags[pdu_cb->dfrag_idx + i], + sizeof(skb_frag_t)); + + get_page(skb_frag_page(&hssi->frags[dfrag_idx])); + + len += skb_frag_size(&hssi->frags[dfrag_idx]); + + hssi->nr_frags++; + hpdu_cb->frags++; + } + + hpdu_cb->dlen = pdu_cb->dlen; + hpdu_cb->doffset = hpdu_cb->hlen; + hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags; + hskb->len += len; + hskb->data_len += len; + hskb->truesize += len; + } + + if (pdu_cb->flags & PDUCBF_RX_STATUS) { + hpdu_cb->flags |= pdu_cb->flags; + + if (hpdu_cb->flags & PDUCBF_RX_DATA) + hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD; + + hpdu_cb->ddigest = pdu_cb->ddigest; + hpdu_cb->pdulen = pdu_cb->pdulen; + } +} + +static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); + u8 pdu_idx = 0, last_idx = 0; + int ret = 0; + + if (!pdu_cb->complete) { + cxgbit_lro_skb_merge(csk, skb, 0); + + if (pdu_cb->flags & PDUCBF_RX_STATUS) { + struct sk_buff *hskb = csk->lro_hskb; + + ret = cxgbit_process_iscsi_pdu(csk, hskb, 0); + + cxgbit_lro_hskb_reset(csk); + + if (ret < 0) + goto out; + } + + pdu_idx = 1; + } + + if (lro_cb->pdu_idx) + last_idx = lro_cb->pdu_idx - 1; + + for (; pdu_idx <= last_idx; pdu_idx++) { + ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx); + if (ret < 0) + goto out; + } + + if ((!lro_cb->complete) && lro_cb->pdu_idx) + cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx); + +out: + return ret; +} + +static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); + int ret = -1; + + if ((pdu_cb->flags & PDUCBF_RX_HDR) && + (pdu_cb->seq != csk->rcv_nxt)) { + pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n", + csk, csk->tid, pdu_cb->seq, csk->rcv_nxt); + cxgbit_lro_skb_dump(skb); + return ret; + } + + csk->rcv_nxt += lro_cb->pdu_totallen; + + ret = cxgbit_process_lro_skb(csk, skb); + + csk->rx_credits += lro_cb->pdu_totallen; + + if (csk->rx_credits >= (csk->rcv_win / 4)) + cxgbit_rx_data_ack(csk); + + return ret; +} + +static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + int ret; + + ret = cxgbit_process_lro_skb(csk, skb); + if (ret) + return ret; + + csk->rx_credits += lro_cb->pdu_totallen; + if (csk->rx_credits >= csk->rcv_win) { + csk->rx_credits = 0; + cxgbit_rx_data_ack(csk); + } + + return 0; +} + +static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; + int ret = -1; + + if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { + if (is_t5(lldi->adapter_type)) + ret = cxgbit_t5_rx_lro_skb(csk, skb); + else + ret = cxgbit_rx_lro_skb(csk, skb); + } + + __kfree_skb(skb); + return ret; +} + +static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq) +{ + spin_lock_bh(&csk->rxq.lock); + if (skb_queue_len(&csk->rxq)) { + skb_queue_splice_init(&csk->rxq, rxq); + spin_unlock_bh(&csk->rxq.lock); + return true; + } + spin_unlock_bh(&csk->rxq.lock); + return false; +} + +static int cxgbit_wait_rxq(struct cxgbit_sock *csk) +{ + struct sk_buff *skb; + struct sk_buff_head rxq; + + skb_queue_head_init(&rxq); + + wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq)); + + if (signal_pending(current)) + goto out; + + while ((skb = __skb_dequeue(&rxq))) { + if (cxgbit_rx_skb(csk, skb)) + goto out; + } + + return 0; +out: + __skb_queue_purge(&rxq); + return -1; +} + +int cxgbit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login) +{ + struct cxgbit_sock *csk = conn->context; + int ret = -1; + + while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) { + ret = cxgbit_wait_rxq(csk); + if (ret) { + clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); + break; + } + } + + return ret; +} + +void cxgbit_get_rx_pdu(struct iscsit_conn *conn) +{ + struct cxgbit_sock *csk = conn->context; + + while (!kthread_should_stop()) { + iscsit_thread_check_cpumask(conn, current, 0); + if (cxgbit_wait_rxq(csk)) + return; + } +} diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c new file mode 100644 index 0000000000..b516c28934 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target.c @@ -0,0 +1,4853 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains main functions related to the iSCSI Target Core Driver. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <crypto/hash.h> +#include <linux/string.h> +#include <linux/kthread.h> +#include <linux/completion.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/idr.h> +#include <linux/delay.h> +#include <linux/sched/signal.h> +#include <asm/unaligned.h> +#include <linux/inet.h> +#include <net/ipv6.h> +#include <scsi/scsi_proto.h> +#include <scsi/iscsi_proto.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include <target/target_core_backend.h> +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_parameters.h" +#include "iscsi_target_seq_pdu_list.h" +#include "iscsi_target_datain_values.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_erl1.h" +#include "iscsi_target_erl2.h" +#include "iscsi_target_login.h" +#include "iscsi_target_tmr.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" +#include "iscsi_target_device.h" +#include <target/iscsi/iscsi_target_stat.h> + +#include <target/iscsi/iscsi_transport.h> + +static LIST_HEAD(g_tiqn_list); +static LIST_HEAD(g_np_list); +static DEFINE_SPINLOCK(tiqn_lock); +static DEFINE_MUTEX(np_lock); + +static struct idr tiqn_idr; +DEFINE_IDA(sess_ida); +struct mutex auth_id_lock; + +struct iscsit_global *iscsit_global; + +struct kmem_cache *lio_qr_cache; +struct kmem_cache *lio_dr_cache; +struct kmem_cache *lio_ooo_cache; +struct kmem_cache *lio_r2t_cache; + +static int iscsit_handle_immediate_data(struct iscsit_cmd *, + struct iscsi_scsi_req *, u32); + +struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) +{ + struct iscsi_tiqn *tiqn = NULL; + + spin_lock(&tiqn_lock); + list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { + if (!strcmp(tiqn->tiqn, buf)) { + + spin_lock(&tiqn->tiqn_state_lock); + if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { + tiqn->tiqn_access_count++; + spin_unlock(&tiqn->tiqn_state_lock); + spin_unlock(&tiqn_lock); + return tiqn; + } + spin_unlock(&tiqn->tiqn_state_lock); + } + } + spin_unlock(&tiqn_lock); + + return NULL; +} + +static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn) +{ + spin_lock(&tiqn->tiqn_state_lock); + if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { + tiqn->tiqn_state = TIQN_STATE_SHUTDOWN; + spin_unlock(&tiqn->tiqn_state_lock); + return 0; + } + spin_unlock(&tiqn->tiqn_state_lock); + + return -1; +} + +void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn) +{ + spin_lock(&tiqn->tiqn_state_lock); + tiqn->tiqn_access_count--; + spin_unlock(&tiqn->tiqn_state_lock); +} + +/* + * Note that IQN formatting is expected to be done in userspace, and + * no explict IQN format checks are done here. + */ +struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) +{ + struct iscsi_tiqn *tiqn = NULL; + int ret; + + if (strlen(buf) >= ISCSI_IQN_LEN) { + pr_err("Target IQN exceeds %d bytes\n", + ISCSI_IQN_LEN); + return ERR_PTR(-EINVAL); + } + + tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL); + if (!tiqn) + return ERR_PTR(-ENOMEM); + + sprintf(tiqn->tiqn, "%s", buf); + INIT_LIST_HEAD(&tiqn->tiqn_list); + INIT_LIST_HEAD(&tiqn->tiqn_tpg_list); + spin_lock_init(&tiqn->tiqn_state_lock); + spin_lock_init(&tiqn->tiqn_tpg_lock); + spin_lock_init(&tiqn->sess_err_stats.lock); + spin_lock_init(&tiqn->login_stats.lock); + spin_lock_init(&tiqn->logout_stats.lock); + + tiqn->tiqn_state = TIQN_STATE_ACTIVE; + + idr_preload(GFP_KERNEL); + spin_lock(&tiqn_lock); + + ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT); + if (ret < 0) { + pr_err("idr_alloc() failed for tiqn->tiqn_index\n"); + spin_unlock(&tiqn_lock); + idr_preload_end(); + kfree(tiqn); + return ERR_PTR(ret); + } + tiqn->tiqn_index = ret; + list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); + + spin_unlock(&tiqn_lock); + idr_preload_end(); + + pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); + + return tiqn; + +} + +static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn) +{ + /* + * Wait for accesses to said struct iscsi_tiqn to end. + */ + spin_lock(&tiqn->tiqn_state_lock); + while (tiqn->tiqn_access_count != 0) { + spin_unlock(&tiqn->tiqn_state_lock); + msleep(10); + spin_lock(&tiqn->tiqn_state_lock); + } + spin_unlock(&tiqn->tiqn_state_lock); +} + +void iscsit_del_tiqn(struct iscsi_tiqn *tiqn) +{ + /* + * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN + * while holding tiqn->tiqn_state_lock. This means that all subsequent + * attempts to access this struct iscsi_tiqn will fail from both transport + * fabric and control code paths. + */ + if (iscsit_set_tiqn_shutdown(tiqn) < 0) { + pr_err("iscsit_set_tiqn_shutdown() failed\n"); + return; + } + + iscsit_wait_for_tiqn(tiqn); + + spin_lock(&tiqn_lock); + list_del(&tiqn->tiqn_list); + idr_remove(&tiqn_idr, tiqn->tiqn_index); + spin_unlock(&tiqn_lock); + + pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n", + tiqn->tiqn); + kfree(tiqn); +} + +int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) +{ + int ret; + /* + * Determine if the network portal is accepting storage traffic. + */ + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { + spin_unlock_bh(&np->np_thread_lock); + return -1; + } + spin_unlock_bh(&np->np_thread_lock); + /* + * Determine if the portal group is accepting storage traffic. + */ + spin_lock_bh(&tpg->tpg_state_lock); + if (tpg->tpg_state != TPG_STATE_ACTIVE) { + spin_unlock_bh(&tpg->tpg_state_lock); + return -1; + } + spin_unlock_bh(&tpg->tpg_state_lock); + + /* + * Here we serialize access across the TIQN+TPG Tuple. + */ + ret = down_interruptible(&tpg->np_login_sem); + if (ret != 0) + return -1; + + spin_lock_bh(&tpg->tpg_state_lock); + if (tpg->tpg_state != TPG_STATE_ACTIVE) { + spin_unlock_bh(&tpg->tpg_state_lock); + up(&tpg->np_login_sem); + return -1; + } + spin_unlock_bh(&tpg->tpg_state_lock); + + return 0; +} + +void iscsit_login_kref_put(struct kref *kref) +{ + struct iscsi_tpg_np *tpg_np = container_of(kref, + struct iscsi_tpg_np, tpg_np_kref); + + complete(&tpg_np->tpg_np_comp); +} + +int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg, + struct iscsi_tpg_np *tpg_np) +{ + struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; + + up(&tpg->np_login_sem); + + if (tpg_np) + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); + + if (tiqn) + iscsit_put_tiqn_for_login(tiqn); + + return 0; +} + +bool iscsit_check_np_match( + struct sockaddr_storage *sockaddr, + struct iscsi_np *np, + int network_transport) +{ + struct sockaddr_in *sock_in, *sock_in_e; + struct sockaddr_in6 *sock_in6, *sock_in6_e; + bool ip_match = false; + u16 port, port_e; + + if (sockaddr->ss_family == AF_INET6) { + sock_in6 = (struct sockaddr_in6 *)sockaddr; + sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; + + if (!memcmp(&sock_in6->sin6_addr.in6_u, + &sock_in6_e->sin6_addr.in6_u, + sizeof(struct in6_addr))) + ip_match = true; + + port = ntohs(sock_in6->sin6_port); + port_e = ntohs(sock_in6_e->sin6_port); + } else { + sock_in = (struct sockaddr_in *)sockaddr; + sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; + + if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr) + ip_match = true; + + port = ntohs(sock_in->sin_port); + port_e = ntohs(sock_in_e->sin_port); + } + + if (ip_match && (port_e == port) && + (np->np_network_transport == network_transport)) + return true; + + return false; +} + +static struct iscsi_np *iscsit_get_np( + struct sockaddr_storage *sockaddr, + int network_transport) +{ + struct iscsi_np *np; + bool match; + + lockdep_assert_held(&np_lock); + + list_for_each_entry(np, &g_np_list, np_list) { + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { + spin_unlock_bh(&np->np_thread_lock); + continue; + } + + match = iscsit_check_np_match(sockaddr, np, network_transport); + if (match) { + /* + * Increment the np_exports reference count now to + * prevent iscsit_del_np() below from being called + * while iscsi_tpg_add_network_portal() is called. + */ + np->np_exports++; + spin_unlock_bh(&np->np_thread_lock); + return np; + } + spin_unlock_bh(&np->np_thread_lock); + } + + return NULL; +} + +struct iscsi_np *iscsit_add_np( + struct sockaddr_storage *sockaddr, + int network_transport) +{ + struct iscsi_np *np; + int ret; + + mutex_lock(&np_lock); + + /* + * Locate the existing struct iscsi_np if already active.. + */ + np = iscsit_get_np(sockaddr, network_transport); + if (np) { + mutex_unlock(&np_lock); + return np; + } + + np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) { + mutex_unlock(&np_lock); + return ERR_PTR(-ENOMEM); + } + + np->np_flags |= NPF_IP_NETWORK; + np->np_network_transport = network_transport; + spin_lock_init(&np->np_thread_lock); + init_completion(&np->np_restart_comp); + INIT_LIST_HEAD(&np->np_list); + + ret = iscsi_target_setup_login_socket(np, sockaddr); + if (ret != 0) { + kfree(np); + mutex_unlock(&np_lock); + return ERR_PTR(ret); + } + + np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np"); + if (IS_ERR(np->np_thread)) { + pr_err("Unable to create kthread: iscsi_np\n"); + ret = PTR_ERR(np->np_thread); + kfree(np); + mutex_unlock(&np_lock); + return ERR_PTR(ret); + } + /* + * Increment the np_exports reference count now to prevent + * iscsit_del_np() below from being run while a new call to + * iscsi_tpg_add_network_portal() for a matching iscsi_np is + * active. We don't need to hold np->np_thread_lock at this + * point because iscsi_np has not been added to g_np_list yet. + */ + np->np_exports = 1; + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + + list_add_tail(&np->np_list, &g_np_list); + mutex_unlock(&np_lock); + + pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n", + &np->np_sockaddr, np->np_transport->name); + + return np; +} + +int iscsit_reset_np_thread( + struct iscsi_np *np, + struct iscsi_tpg_np *tpg_np, + struct iscsi_portal_group *tpg, + bool shutdown) +{ + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { + spin_unlock_bh(&np->np_thread_lock); + return 0; + } + np->np_thread_state = ISCSI_NP_THREAD_RESET; + atomic_inc(&np->np_reset_count); + + if (np->np_thread) { + spin_unlock_bh(&np->np_thread_lock); + send_sig(SIGINT, np->np_thread, 1); + wait_for_completion(&np->np_restart_comp); + spin_lock_bh(&np->np_thread_lock); + } + spin_unlock_bh(&np->np_thread_lock); + + if (tpg_np && shutdown) { + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); + + wait_for_completion(&tpg_np->tpg_np_comp); + } + + return 0; +} + +static void iscsit_free_np(struct iscsi_np *np) +{ + if (np->np_socket) + sock_release(np->np_socket); +} + +int iscsit_del_np(struct iscsi_np *np) +{ + spin_lock_bh(&np->np_thread_lock); + np->np_exports--; + if (np->np_exports) { + np->enabled = true; + spin_unlock_bh(&np->np_thread_lock); + return 0; + } + np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN; + spin_unlock_bh(&np->np_thread_lock); + + if (np->np_thread) { + /* + * We need to send the signal to wakeup Linux/Net + * which may be sleeping in sock_accept().. + */ + send_sig(SIGINT, np->np_thread, 1); + kthread_stop(np->np_thread); + np->np_thread = NULL; + } + + np->np_transport->iscsit_free_np(np); + + mutex_lock(&np_lock); + list_del(&np->np_list); + mutex_unlock(&np_lock); + + pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n", + &np->np_sockaddr, np->np_transport->name); + + iscsit_put_transport(np->np_transport); + kfree(np); + return 0; +} + +static void iscsit_get_rx_pdu(struct iscsit_conn *); + +int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd) +{ + return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); +} +EXPORT_SYMBOL(iscsit_queue_rsp); + +void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd) +{ + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + __iscsit_free_cmd(cmd, true); +} +EXPORT_SYMBOL(iscsit_aborted_task); + +static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *, + u32, u32, const void *, void *); +static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *); + +static int +iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + const void *data_buf, u32 data_buf_len) +{ + struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu; + struct kvec *iov; + u32 niov = 0, tx_size = ISCSI_HDR_LEN; + int ret; + + iov = &cmd->iov_misc[0]; + iov[niov].iov_base = cmd->pdu; + iov[niov++].iov_len = ISCSI_HDR_LEN; + + if (conn->conn_ops->HeaderDigest) { + u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; + + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, + header_digest); + + iov[0].iov_len += ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + pr_debug("Attaching CRC32C HeaderDigest" + " to opcode 0x%x 0x%08x\n", + hdr->opcode, *header_digest); + } + + if (data_buf_len) { + u32 padding = ((-data_buf_len) & 3); + + iov[niov].iov_base = (void *)data_buf; + iov[niov++].iov_len = data_buf_len; + tx_size += data_buf_len; + + if (padding != 0) { + iov[niov].iov_base = &cmd->pad_bytes; + iov[niov++].iov_len = padding; + tx_size += padding; + pr_debug("Attaching %u additional" + " padding bytes.\n", padding); + } + + if (conn->conn_ops->DataDigest) { + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, + data_buf, data_buf_len, + padding, &cmd->pad_bytes, + &cmd->data_crc); + + iov[niov].iov_base = &cmd->data_crc; + iov[niov++].iov_len = ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + pr_debug("Attached DataDigest for %u" + " bytes opcode 0x%x, CRC 0x%08x\n", + data_buf_len, hdr->opcode, cmd->data_crc); + } + } + + cmd->iov_misc_count = niov; + cmd->tx_size = tx_size; + + ret = iscsit_send_tx_data(cmd, conn, 1); + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + return 0; +} + +static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec, + u32 data_offset, u32 data_length); +static void iscsit_unmap_iovec(struct iscsit_cmd *); +static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *, + u32, u32, u32, u8 *); +static int +iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + const struct iscsi_datain *datain) +{ + struct kvec *iov; + u32 iov_count = 0, tx_size = 0; + int ret, iov_ret; + + iov = &cmd->iov_data[0]; + iov[iov_count].iov_base = cmd->pdu; + iov[iov_count++].iov_len = ISCSI_HDR_LEN; + tx_size += ISCSI_HDR_LEN; + + if (conn->conn_ops->HeaderDigest) { + u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; + + iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, + header_digest); + + iov[0].iov_len += ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + + pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n", + *header_digest); + } + + iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count], + cmd->orig_iov_data_count - (iov_count + 2), + datain->offset, datain->length); + if (iov_ret < 0) + return -1; + + iov_count += iov_ret; + tx_size += datain->length; + + cmd->padding = ((-datain->length) & 3); + if (cmd->padding) { + iov[iov_count].iov_base = cmd->pad_bytes; + iov[iov_count++].iov_len = cmd->padding; + tx_size += cmd->padding; + + pr_debug("Attaching %u padding bytes\n", cmd->padding); + } + + if (conn->conn_ops->DataDigest) { + cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, + cmd, datain->offset, + datain->length, + cmd->padding, + cmd->pad_bytes); + + iov[iov_count].iov_base = &cmd->data_crc; + iov[iov_count++].iov_len = ISCSI_CRC_LEN; + tx_size += ISCSI_CRC_LEN; + + pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n", + datain->length + cmd->padding, cmd->data_crc); + } + + cmd->iov_data_count = iov_count; + cmd->tx_size = tx_size; + + ret = iscsit_fe_sendpage_sg(cmd, conn); + + iscsit_unmap_iovec(cmd); + + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + return 0; +} + +static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_datain_req *dr, const void *buf, + u32 buf_len) +{ + if (dr) + return iscsit_xmit_datain_pdu(conn, cmd, buf); + else + return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); +} + +static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn) +{ + return TARGET_PROT_NORMAL; +} + +static struct iscsit_transport iscsi_target_transport = { + .name = "iSCSI/TCP", + .transport_type = ISCSI_TCP, + .rdma_shutdown = false, + .owner = NULL, + .iscsit_setup_np = iscsit_setup_np, + .iscsit_accept_np = iscsit_accept_np, + .iscsit_free_np = iscsit_free_np, + .iscsit_get_login_rx = iscsit_get_login_rx, + .iscsit_put_login_tx = iscsit_put_login_tx, + .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, + .iscsit_immediate_queue = iscsit_immediate_queue, + .iscsit_response_queue = iscsit_response_queue, + .iscsit_queue_data_in = iscsit_queue_rsp, + .iscsit_queue_status = iscsit_queue_rsp, + .iscsit_aborted_task = iscsit_aborted_task, + .iscsit_xmit_pdu = iscsit_xmit_pdu, + .iscsit_get_rx_pdu = iscsit_get_rx_pdu, + .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, +}; + +static int __init iscsi_target_init_module(void) +{ + int ret = 0, size; + + pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); + iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL); + if (!iscsit_global) + return -1; + + spin_lock_init(&iscsit_global->ts_bitmap_lock); + mutex_init(&auth_id_lock); + idr_init(&tiqn_idr); + + ret = target_register_template(&iscsi_ops); + if (ret) + goto out; + + size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); + iscsit_global->ts_bitmap = vzalloc(size); + if (!iscsit_global->ts_bitmap) + goto configfs_out; + + if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) { + pr_err("Unable to allocate iscsit_global->allowed_cpumask\n"); + goto bitmap_out; + } + cpumask_setall(iscsit_global->allowed_cpumask); + + lio_qr_cache = kmem_cache_create("lio_qr_cache", + sizeof(struct iscsi_queue_req), + __alignof__(struct iscsi_queue_req), 0, NULL); + if (!lio_qr_cache) { + pr_err("Unable to kmem_cache_create() for" + " lio_qr_cache\n"); + goto cpumask_out; + } + + lio_dr_cache = kmem_cache_create("lio_dr_cache", + sizeof(struct iscsi_datain_req), + __alignof__(struct iscsi_datain_req), 0, NULL); + if (!lio_dr_cache) { + pr_err("Unable to kmem_cache_create() for" + " lio_dr_cache\n"); + goto qr_out; + } + + lio_ooo_cache = kmem_cache_create("lio_ooo_cache", + sizeof(struct iscsi_ooo_cmdsn), + __alignof__(struct iscsi_ooo_cmdsn), 0, NULL); + if (!lio_ooo_cache) { + pr_err("Unable to kmem_cache_create() for" + " lio_ooo_cache\n"); + goto dr_out; + } + + lio_r2t_cache = kmem_cache_create("lio_r2t_cache", + sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t), + 0, NULL); + if (!lio_r2t_cache) { + pr_err("Unable to kmem_cache_create() for" + " lio_r2t_cache\n"); + goto ooo_out; + } + + iscsit_register_transport(&iscsi_target_transport); + + if (iscsit_load_discovery_tpg() < 0) + goto r2t_out; + + return ret; +r2t_out: + iscsit_unregister_transport(&iscsi_target_transport); + kmem_cache_destroy(lio_r2t_cache); +ooo_out: + kmem_cache_destroy(lio_ooo_cache); +dr_out: + kmem_cache_destroy(lio_dr_cache); +qr_out: + kmem_cache_destroy(lio_qr_cache); +cpumask_out: + free_cpumask_var(iscsit_global->allowed_cpumask); +bitmap_out: + vfree(iscsit_global->ts_bitmap); +configfs_out: + /* XXX: this probably wants it to be it's own unwind step.. */ + if (iscsit_global->discovery_tpg) + iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); + target_unregister_template(&iscsi_ops); +out: + kfree(iscsit_global); + return -ENOMEM; +} + +static void __exit iscsi_target_cleanup_module(void) +{ + iscsit_release_discovery_tpg(); + iscsit_unregister_transport(&iscsi_target_transport); + kmem_cache_destroy(lio_qr_cache); + kmem_cache_destroy(lio_dr_cache); + kmem_cache_destroy(lio_ooo_cache); + kmem_cache_destroy(lio_r2t_cache); + + /* + * Shutdown discovery sessions and disable discovery TPG + */ + if (iscsit_global->discovery_tpg) + iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); + + target_unregister_template(&iscsi_ops); + + free_cpumask_var(iscsit_global->allowed_cpumask); + vfree(iscsit_global->ts_bitmap); + kfree(iscsit_global); +} + +int iscsit_add_reject( + struct iscsit_conn *conn, + u8 reason, + unsigned char *buf) +{ + struct iscsit_cmd *cmd; + + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + return -1; + + cmd->iscsi_opcode = ISCSI_OP_REJECT; + cmd->reject_reason = reason; + + cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); + if (!cmd->buf_ptr) { + pr_err("Unable to allocate memory for cmd->buf_ptr\n"); + iscsit_free_cmd(cmd, false); + return -1; + } + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + cmd->i_state = ISTATE_SEND_REJECT; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + + return -1; +} +EXPORT_SYMBOL(iscsit_add_reject); + +static int iscsit_add_reject_from_cmd( + struct iscsit_cmd *cmd, + u8 reason, + bool add_to_conn, + unsigned char *buf) +{ + struct iscsit_conn *conn; + const bool do_put = cmd->se_cmd.se_tfo != NULL; + + if (!cmd->conn) { + pr_err("cmd->conn is NULL for ITT: 0x%08x\n", + cmd->init_task_tag); + return -1; + } + conn = cmd->conn; + + cmd->iscsi_opcode = ISCSI_OP_REJECT; + cmd->reject_reason = reason; + + cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); + if (!cmd->buf_ptr) { + pr_err("Unable to allocate memory for cmd->buf_ptr\n"); + iscsit_free_cmd(cmd, false); + return -1; + } + + if (add_to_conn) { + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + } + + cmd->i_state = ISTATE_SEND_REJECT; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + /* + * Perform the kref_put now if se_cmd has already been setup by + * scsit_setup_scsi_cmd() + */ + if (do_put) { + pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); + target_put_sess_cmd(&cmd->se_cmd); + } + return -1; +} + +static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason, + unsigned char *buf) +{ + return iscsit_add_reject_from_cmd(cmd, reason, true, buf); +} + +int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf) +{ + return iscsit_add_reject_from_cmd(cmd, reason, false, buf); +} +EXPORT_SYMBOL(iscsit_reject_cmd); + +/* + * Map some portion of the allocated scatterlist to an iovec, suitable for + * kernel sockets to copy data in/out. + */ +static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec, + u32 data_offset, u32 data_length) +{ + u32 i = 0, orig_data_length = data_length; + struct scatterlist *sg; + unsigned int page_off; + + /* + * We know each entry in t_data_sg contains a page. + */ + u32 ent = data_offset / PAGE_SIZE; + + if (!data_length) + return 0; + + if (ent >= cmd->se_cmd.t_data_nents) { + pr_err("Initial page entry out-of-bounds\n"); + goto overflow; + } + + sg = &cmd->se_cmd.t_data_sg[ent]; + page_off = (data_offset % PAGE_SIZE); + + cmd->first_data_sg = sg; + cmd->first_data_sg_off = page_off; + + while (data_length) { + u32 cur_len; + + if (WARN_ON_ONCE(!sg || i >= nvec)) + goto overflow; + + cur_len = min_t(u32, data_length, sg->length - page_off); + + iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off; + iov[i].iov_len = cur_len; + + data_length -= cur_len; + page_off = 0; + sg = sg_next(sg); + i++; + } + + cmd->kmapped_nents = i; + + return i; + +overflow: + pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n", + data_offset, orig_data_length, i, nvec); + for_each_sg(cmd->se_cmd.t_data_sg, sg, + cmd->se_cmd.t_data_nents, i) { + pr_err("[%d] off %d len %d\n", + i, sg->offset, sg->length); + } + return -1; +} + +static void iscsit_unmap_iovec(struct iscsit_cmd *cmd) +{ + u32 i; + struct scatterlist *sg; + + sg = cmd->first_data_sg; + + for (i = 0; i < cmd->kmapped_nents; i++) + kunmap(sg_page(&sg[i])); +} + +static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn) +{ + LIST_HEAD(ack_list); + struct iscsit_cmd *cmd, *cmd_p; + + conn->exp_statsn = exp_statsn; + + if (conn->sess->sess_ops->RDMAExtensions) + return; + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) { + spin_lock(&cmd->istate_lock); + if ((cmd->i_state == ISTATE_SENT_STATUS) && + iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { + cmd->i_state = ISTATE_REMOVE; + spin_unlock(&cmd->istate_lock); + list_move_tail(&cmd->i_conn_node, &ack_list); + continue; + } + spin_unlock(&cmd->istate_lock); + } + spin_unlock_bh(&conn->cmd_lock); + + list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { + list_del_init(&cmd->i_conn_node); + iscsit_free_cmd(cmd, false); + } +} + +static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd) +{ + u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE)); + + iov_count += ISCSI_IOV_DATA_BUFFER; + cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL); + if (!cmd->iov_data) + return -ENOMEM; + + cmd->orig_iov_data_count = iov_count; + return 0; +} + +int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char *buf) +{ + int data_direction, payload_length; + struct iscsi_ecdb_ahdr *ecdb_ahdr; + struct iscsi_scsi_req *hdr; + int iscsi_task_attr; + unsigned char *cdb; + int sam_task_attr; + + atomic_long_inc(&conn->sess->cmd_pdus); + + hdr = (struct iscsi_scsi_req *) buf; + payload_length = ntoh24(hdr->dlength); + + /* FIXME; Add checks for AdditionalHeaderSegment */ + + if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && + !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { + pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" + " not set. Bad iSCSI Initiator.\n"); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); + } + + if (((hdr->flags & ISCSI_FLAG_CMD_READ) || + (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { + /* + * From RFC-3720 Section 10.3.1: + * + * "Either or both of R and W MAY be 1 when either the + * Expected Data Transfer Length and/or Bidirectional Read + * Expected Data Transfer Length are 0" + * + * For this case, go ahead and clear the unnecssary bits + * to avoid any confusion with ->data_direction. + */ + hdr->flags &= ~ISCSI_FLAG_CMD_READ; + hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; + + pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" + " set when Expected Data Transfer Length is 0 for" + " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); + } + + if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && + !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { + pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" + " MUST be set if Expected Data Transfer Length is not 0." + " Bad iSCSI Initiator\n"); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); + } + + if ((hdr->flags & ISCSI_FLAG_CMD_READ) && + (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { + pr_err("Bidirectional operations not supported!\n"); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); + } + + if (hdr->opcode & ISCSI_OP_IMMEDIATE) { + pr_err("Illegally set Immediate Bit in iSCSI Initiator" + " Scsi Command PDU.\n"); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); + } + + if (payload_length && !conn->sess->sess_ops->ImmediateData) { + pr_err("ImmediateData=No but DataSegmentLength=%u," + " protocol error.\n", payload_length); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + if ((be32_to_cpu(hdr->data_length) == payload_length) && + (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { + pr_err("Expected Data Transfer Length and Length of" + " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" + " bit is not set protocol error\n"); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + if (payload_length > be32_to_cpu(hdr->data_length)) { + pr_err("DataSegmentLength: %u is greater than" + " EDTL: %u, protocol error.\n", payload_length, + hdr->data_length); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err("DataSegmentLength: %u is greater than" + " MaxXmitDataSegmentLength: %u, protocol error.\n", + payload_length, conn->conn_ops->MaxXmitDataSegmentLength); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + if (payload_length > conn->sess->sess_ops->FirstBurstLength) { + pr_err("DataSegmentLength: %u is greater than" + " FirstBurstLength: %u, protocol error.\n", + payload_length, conn->sess->sess_ops->FirstBurstLength); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); + } + + cdb = hdr->cdb; + + if (hdr->hlength) { + ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1); + if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) { + pr_err("Additional Header Segment type %d not supported!\n", + ecdb_ahdr->ahstype); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_CMD_NOT_SUPPORTED, buf); + } + + cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15, + GFP_KERNEL); + if (cdb == NULL) + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE); + memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb, + be16_to_cpu(ecdb_ahdr->ahslength) - 1); + } + + data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : + (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : + DMA_NONE; + + cmd->data_direction = data_direction; + iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; + /* + * Figure out the SAM Task Attribute for the incoming SCSI CDB + */ + if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || + (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) + sam_task_attr = TCM_SIMPLE_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) + sam_task_attr = TCM_ORDERED_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) + sam_task_attr = TCM_HEAD_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_ACA) + sam_task_attr = TCM_ACA_TAG; + else { + pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" + " TCM_SIMPLE_TAG\n", iscsi_task_attr); + sam_task_attr = TCM_SIMPLE_TAG; + } + + cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; + cmd->i_state = ISTATE_NEW_CMD; + cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); + cmd->immediate_data = (payload_length) ? 1 : 0; + cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) && + (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0); + if (cmd->unsolicited_data) + cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; + + conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; + if (hdr->flags & ISCSI_FLAG_CMD_READ) + cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); + else + cmd->targ_xfer_tag = 0xFFFFFFFF; + cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); + cmd->first_burst_len = payload_length; + + if (!conn->sess->sess_ops->RDMAExtensions && + cmd->data_direction == DMA_FROM_DEVICE) { + struct iscsi_datain_req *dr; + + dr = iscsit_allocate_datain_req(); + if (!dr) { + if (cdb != hdr->cdb) + kfree(cdb); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + + iscsit_attach_datain_req(cmd, dr); + } + + /* + * Initialize struct se_cmd descriptor from target_core_mod infrastructure + */ + __target_init_cmd(&cmd->se_cmd, &iscsi_ops, + conn->sess->se_sess, be32_to_cpu(hdr->data_length), + cmd->data_direction, sam_task_attr, + cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun), + conn->cmd_cnt); + + pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," + " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, + hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, + conn->cid); + + target_get_sess_cmd(&cmd->se_cmd, true); + + cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; + cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb, + GFP_KERNEL); + + if (cdb != hdr->cdb) + kfree(cdb); + + if (cmd->sense_reason) { + if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + + goto attach_cmd; + } + + cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd); + if (cmd->sense_reason) + goto attach_cmd; + + cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd); + if (cmd->sense_reason) + goto attach_cmd; + + if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) { + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + +attach_cmd: + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + /* + * Check if we need to delay processing because of ALUA + * Active/NonOptimized primary access state.. + */ + core_alua_check_nonop_delay(&cmd->se_cmd); + + return 0; +} +EXPORT_SYMBOL(iscsit_setup_scsi_cmd); + +void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd) +{ + iscsit_set_dataout_sequence_values(cmd); + + spin_lock_bh(&cmd->dataout_timeout_lock); + iscsit_start_dataout_timer(cmd, cmd->conn); + spin_unlock_bh(&cmd->dataout_timeout_lock); +} +EXPORT_SYMBOL(iscsit_set_unsolicited_dataout); + +int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_scsi_req *hdr) +{ + int cmdsn_ret = 0; + /* + * Check the CmdSN against ExpCmdSN/MaxCmdSN here if + * the Immediate Bit is not set, and no Immediate + * Data is attached. + * + * A PDU/CmdSN carrying Immediate Data can only + * be processed after the DataCRC has passed. + * If the DataCRC fails, the CmdSN MUST NOT + * be acknowledged. (See below) + */ + if (!cmd->immediate_data) { + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); + return 0; + } + } + + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); + + /* + * If no Immediate Data is attached, it's OK to return now. + */ + if (!cmd->immediate_data) { + if (!cmd->sense_reason && cmd->unsolicited_data) + iscsit_set_unsolicited_dataout(cmd); + if (!cmd->sense_reason) + return 0; + + target_put_sess_cmd(&cmd->se_cmd); + return 0; + } + + /* + * Early CHECK_CONDITIONs with ImmediateData never make it to command + * execution. These exceptions are processed in CmdSN order using + * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. + */ + if (cmd->sense_reason) + return 1; + /* + * Call directly into transport_generic_new_cmd() to perform + * the backend memory allocation. + */ + cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd); + if (cmd->sense_reason) + return 1; + + return 0; +} +EXPORT_SYMBOL(iscsit_process_scsi_cmd); + +static int +iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr, + bool dump_payload) +{ + int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; + int rc; + + /* + * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. + */ + if (dump_payload) { + u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done, + cmd->first_burst_len); + + pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n", + cmd->se_cmd.data_length, cmd->write_data_done, + cmd->first_burst_len, length); + rc = iscsit_dump_data_payload(cmd->conn, length, 1); + pr_debug("Finished dumping immediate data\n"); + if (rc < 0) + immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER; + } else { + immed_ret = iscsit_handle_immediate_data(cmd, hdr, + cmd->first_burst_len); + } + + if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { + /* + * A PDU/CmdSN carrying Immediate Data passed + * DataCRC, check against ExpCmdSN/MaxCmdSN if + * Immediate Bit is not set. + */ + cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + + if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); + + return 0; + } else if (cmd->unsolicited_data) + iscsit_set_unsolicited_dataout(cmd); + + } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { + /* + * Immediate Data failed DataCRC and ERL>=1, + * silently drop this PDU and let the initiator + * plug the CmdSN gap. + * + * FIXME: Send Unsolicited NOPIN with reserved + * TTT here to help the initiator figure out + * the missing CmdSN, although they should be + * intelligent enough to determine the missing + * CmdSN and issue a retry to plug the sequence. + */ + cmd->i_state = ISTATE_REMOVE; + iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state); + } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ + return -1; + + return 0; +} + +static int +iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; + int rc, immed_data; + bool dump_payload = false; + + rc = iscsit_setup_scsi_cmd(conn, cmd, buf); + if (rc < 0) + return 0; + /* + * Allocation iovecs needed for struct socket operations for + * traditional iSCSI block I/O. + */ + if (iscsit_allocate_iovecs(cmd) < 0) { + return iscsit_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + immed_data = cmd->immediate_data; + + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); + if (rc < 0) + return rc; + else if (rc > 0) + dump_payload = true; + + if (!immed_data) + return 0; + + return iscsit_get_immediate_data(cmd, hdr, dump_payload); +} + +static u32 iscsit_do_crypto_hash_sg( + struct ahash_request *hash, + struct iscsit_cmd *cmd, + u32 data_offset, + u32 data_length, + u32 padding, + u8 *pad_bytes) +{ + u32 data_crc; + struct scatterlist *sg; + unsigned int page_off; + + crypto_ahash_init(hash); + + sg = cmd->first_data_sg; + page_off = cmd->first_data_sg_off; + + if (data_length && page_off) { + struct scatterlist first_sg; + u32 len = min_t(u32, data_length, sg->length - page_off); + + sg_init_table(&first_sg, 1); + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); + + ahash_request_set_crypt(hash, &first_sg, NULL, len); + crypto_ahash_update(hash); + + data_length -= len; + sg = sg_next(sg); + } + + while (data_length) { + u32 cur_len = min_t(u32, data_length, sg->length); + + ahash_request_set_crypt(hash, sg, NULL, cur_len); + crypto_ahash_update(hash); + + data_length -= cur_len; + /* iscsit_map_iovec has already checked for invalid sg pointers */ + sg = sg_next(sg); + } + + if (padding) { + struct scatterlist pad_sg; + + sg_init_one(&pad_sg, pad_bytes, padding); + ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc, + padding); + crypto_ahash_finup(hash); + } else { + ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0); + crypto_ahash_final(hash); + } + + return data_crc; +} + +static void iscsit_do_crypto_hash_buf(struct ahash_request *hash, + const void *buf, u32 payload_length, u32 padding, + const void *pad_bytes, void *data_crc) +{ + struct scatterlist sg[2]; + + sg_init_table(sg, ARRAY_SIZE(sg)); + sg_set_buf(sg, buf, payload_length); + if (padding) + sg_set_buf(sg + 1, pad_bytes, padding); + + ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); + + crypto_ahash_digest(hash); +} + +int +__iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf, + struct iscsit_cmd *cmd, u32 payload_length, + bool *success) +{ + struct iscsi_data *hdr = buf; + struct se_cmd *se_cmd; + int rc; + + /* iSCSI write */ + atomic_long_add(payload_length, &conn->sess->rx_data_octets); + + pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," + " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", + hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), + payload_length, conn->cid); + + if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { + pr_err("Command ITT: 0x%08x received DataOUT after" + " last DataOUT received, dumping payload\n", + cmd->init_task_tag); + return iscsit_dump_data_payload(conn, payload_length, 1); + } + + if (cmd->data_direction != DMA_TO_DEVICE) { + pr_err("Command ITT: 0x%08x received DataOUT for a" + " NON-WRITE command.\n", cmd->init_task_tag); + return iscsit_dump_data_payload(conn, payload_length, 1); + } + se_cmd = &cmd->se_cmd; + iscsit_mod_dataout_timer(cmd); + + if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) { + pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n", + be32_to_cpu(hdr->offset), payload_length, + cmd->se_cmd.data_length); + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf); + } + + if (cmd->unsolicited_data) { + int dump_unsolicited_data = 0; + + if (conn->sess->sess_ops->InitialR2T) { + pr_err("Received unexpected unsolicited data" + " while InitialR2T=Yes, protocol error.\n"); + transport_send_check_condition_and_sense(&cmd->se_cmd, + TCM_UNEXPECTED_UNSOLICITED_DATA, 0); + return -1; + } + /* + * Special case for dealing with Unsolicited DataOUT + * and Unsupported SAM WRITE Opcodes and SE resource allocation + * failures; + */ + + /* Something's amiss if we're not in WRITE_PENDING state... */ + WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); + if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE)) + dump_unsolicited_data = 1; + + if (dump_unsolicited_data) { + /* + * Check if a delayed TASK_ABORTED status needs to + * be sent now if the ISCSI_FLAG_CMD_FINAL has been + * received with the unsolicited data out. + */ + if (hdr->flags & ISCSI_FLAG_CMD_FINAL) + iscsit_stop_dataout_timer(cmd); + + return iscsit_dump_data_payload(conn, payload_length, 1); + } + } else { + /* + * For the normal solicited data path: + * + * Check for a delayed TASK_ABORTED status and dump any + * incoming data out payload if one exists. Also, when the + * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current + * data out sequence, we decrement outstanding_r2ts. Once + * outstanding_r2ts reaches zero, go ahead and send the delayed + * TASK_ABORTED status. + */ + if (se_cmd->transport_state & CMD_T_ABORTED) { + if (hdr->flags & ISCSI_FLAG_CMD_FINAL && + --cmd->outstanding_r2ts < 1) + iscsit_stop_dataout_timer(cmd); + + return iscsit_dump_data_payload(conn, payload_length, 1); + } + } + /* + * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and + * within-command recovery checks before receiving the payload. + */ + rc = iscsit_check_pre_dataout(cmd, buf); + if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY) + return 0; + else if (rc == DATAOUT_CANNOT_RECOVER) + return -1; + *success = true; + return 0; +} +EXPORT_SYMBOL(__iscsit_check_dataout_hdr); + +int +iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf, + struct iscsit_cmd **out_cmd) +{ + struct iscsi_data *hdr = buf; + struct iscsit_cmd *cmd; + u32 payload_length = ntoh24(hdr->dlength); + int rc; + bool success = false; + + if (!payload_length) { + pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n"); + return 0; + } + + if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err_ratelimited("DataSegmentLength: %u is greater than" + " MaxXmitDataSegmentLength: %u\n", payload_length, + conn->conn_ops->MaxXmitDataSegmentLength); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length); + if (!cmd) + return 0; + + rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success); + + if (success) + *out_cmd = cmd; + + return rc; +} +EXPORT_SYMBOL(iscsit_check_dataout_hdr); + +static int +iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_data *hdr) +{ + struct kvec *iov; + u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0; + u32 payload_length; + int iov_ret, data_crc_failed = 0; + + payload_length = min_t(u32, cmd->se_cmd.data_length, + ntoh24(hdr->dlength)); + rx_size += payload_length; + iov = &cmd->iov_data[0]; + + iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2, + be32_to_cpu(hdr->offset), payload_length); + if (iov_ret < 0) + return -1; + + iov_count += iov_ret; + + padding = ((-payload_length) & 3); + if (padding != 0) { + iov[iov_count].iov_base = cmd->pad_bytes; + iov[iov_count++].iov_len = padding; + rx_size += padding; + pr_debug("Receiving %u padding bytes.\n", padding); + } + + if (conn->conn_ops->DataDigest) { + iov[iov_count].iov_base = &checksum; + iov[iov_count++].iov_len = ISCSI_CRC_LEN; + rx_size += ISCSI_CRC_LEN; + } + + WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count); + rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); + + iscsit_unmap_iovec(cmd); + + if (rx_got != rx_size) + return -1; + + if (conn->conn_ops->DataDigest) { + u32 data_crc; + + data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd, + be32_to_cpu(hdr->offset), + payload_length, padding, + cmd->pad_bytes); + + if (checksum != data_crc) { + pr_err("ITT: 0x%08x, Offset: %u, Length: %u," + " DataSN: 0x%08x, CRC32C DataDigest 0x%08x" + " does not match computed 0x%08x\n", + hdr->itt, hdr->offset, payload_length, + hdr->datasn, checksum, data_crc); + data_crc_failed = 1; + } else { + pr_debug("Got CRC32C DataDigest 0x%08x for" + " %u bytes of Data Out\n", checksum, + payload_length); + } + } + + return data_crc_failed; +} + +int +iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr, + bool data_crc_failed) +{ + struct iscsit_conn *conn = cmd->conn; + int rc, ooo_cmdsn; + /* + * Increment post receive data and CRC values or perform + * within-command recovery. + */ + rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed); + if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)) + return 0; + else if (rc == DATAOUT_SEND_R2T) { + iscsit_set_dataout_sequence_values(cmd); + conn->conn_transport->iscsit_get_dataout(conn, cmd, false); + } else if (rc == DATAOUT_SEND_TO_TRANSPORT) { + /* + * Handle extra special case for out of order + * Unsolicited Data Out. + */ + spin_lock_bh(&cmd->istate_lock); + ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + + iscsit_stop_dataout_timer(cmd); + if (ooo_cmdsn) + return 0; + target_execute_cmd(&cmd->se_cmd); + return 0; + } else /* DATAOUT_CANNOT_RECOVER */ + return -1; + + return 0; +} +EXPORT_SYMBOL(iscsit_check_dataout_payload); + +static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf) +{ + struct iscsit_cmd *cmd = NULL; + struct iscsi_data *hdr = (struct iscsi_data *)buf; + int rc; + bool data_crc_failed = false; + + rc = iscsit_check_dataout_hdr(conn, buf, &cmd); + if (rc < 0) + return 0; + else if (!cmd) + return 0; + + rc = iscsit_get_dataout(conn, cmd, hdr); + if (rc < 0) + return rc; + else if (rc > 0) + data_crc_failed = true; + + return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed); +} + +int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_nopout *hdr) +{ + u32 payload_length = ntoh24(hdr->dlength); + + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { + pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n"); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + } + + if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { + pr_err("NOPOUT ITT is reserved, but Immediate Bit is" + " not set, protocol error.\n"); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + } + + if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err("NOPOUT Ping Data DataSegmentLength: %u is" + " greater than MaxXmitDataSegmentLength: %u, protocol" + " error.\n", payload_length, + conn->conn_ops->MaxXmitDataSegmentLength); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + } + + pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x," + " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", + hdr->itt == RESERVED_ITT ? "Response" : "Request", + hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, + payload_length); + /* + * This is not a response to a Unsolicited NopIN, which means + * it can either be a NOPOUT ping request (with a valid ITT), + * or a NOPOUT not requesting a NOPIN (with a reserved ITT). + * Either way, make sure we allocate an struct iscsit_cmd, as both + * can contain ping data. + */ + if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; + cmd->i_state = ISTATE_SEND_NOPIN; + cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? + 1 : 0); + conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; + cmd->targ_xfer_tag = 0xFFFFFFFF; + cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); + cmd->data_direction = DMA_NONE; + } + + return 0; +} +EXPORT_SYMBOL(iscsit_setup_nop_out); + +int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_nopout *hdr) +{ + struct iscsit_cmd *cmd_p = NULL; + int cmdsn_ret = 0; + /* + * Initiator is expecting a NopIN ping reply.. + */ + if (hdr->itt != RESERVED_ITT) { + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); + + if (hdr->opcode & ISCSI_OP_IMMEDIATE) { + iscsit_add_cmd_to_response_queue(cmd, conn, + cmd->i_state); + return 0; + } + + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + return 0; + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + + return 0; + } + /* + * This was a response to a unsolicited NOPIN ping. + */ + if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { + cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); + if (!cmd_p) + return -EINVAL; + + iscsit_stop_nopin_response_timer(conn); + + cmd_p->i_state = ISTATE_REMOVE; + iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state); + + iscsit_start_nopin_timer(conn); + return 0; + } + /* + * Otherwise, initiator is not expecting a NOPIN is response. + * Just ignore for now. + */ + + if (cmd) + iscsit_free_cmd(cmd, false); + + return 0; +} +EXPORT_SYMBOL(iscsit_process_nop_out); + +static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char *buf) +{ + unsigned char *ping_data = NULL; + struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; + struct kvec *iov = NULL; + u32 payload_length = ntoh24(hdr->dlength); + int ret; + + ret = iscsit_setup_nop_out(conn, cmd, hdr); + if (ret < 0) + return 0; + /* + * Handle NOP-OUT payload for traditional iSCSI sockets + */ + if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + u32 checksum, data_crc, padding = 0; + int niov = 0, rx_got, rx_size = payload_length; + + ping_data = kzalloc(payload_length + 1, GFP_KERNEL); + if (!ping_data) { + ret = -1; + goto out; + } + + iov = &cmd->iov_misc[0]; + iov[niov].iov_base = ping_data; + iov[niov++].iov_len = payload_length; + + padding = ((-payload_length) & 3); + if (padding != 0) { + pr_debug("Receiving %u additional bytes" + " for padding.\n", padding); + iov[niov].iov_base = &cmd->pad_bytes; + iov[niov++].iov_len = padding; + rx_size += padding; + } + if (conn->conn_ops->DataDigest) { + iov[niov].iov_base = &checksum; + iov[niov++].iov_len = ISCSI_CRC_LEN; + rx_size += ISCSI_CRC_LEN; + } + + WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc)); + rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size); + if (rx_got != rx_size) { + ret = -1; + goto out; + } + + if (conn->conn_ops->DataDigest) { + iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data, + payload_length, padding, + cmd->pad_bytes, &data_crc); + + if (checksum != data_crc) { + pr_err("Ping data CRC32C DataDigest" + " 0x%08x does not match computed 0x%08x\n", + checksum, data_crc); + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " NOPOUT Ping DataCRC failure while in" + " ERL=0.\n"); + ret = -1; + goto out; + } else { + /* + * Silently drop this PDU and let the + * initiator plug the CmdSN gap. + */ + pr_debug("Dropping NOPOUT" + " Command CmdSN: 0x%08x due to" + " DataCRC error.\n", hdr->cmdsn); + ret = 0; + goto out; + } + } else { + pr_debug("Got CRC32C DataDigest" + " 0x%08x for %u bytes of ping data.\n", + checksum, payload_length); + } + } + + ping_data[payload_length] = '\0'; + /* + * Attach ping data to struct iscsit_cmd->buf_ptr. + */ + cmd->buf_ptr = ping_data; + cmd->buf_ptr_size = payload_length; + + pr_debug("Got %u bytes of NOPOUT ping" + " data.\n", payload_length); + pr_debug("Ping Data: \"%s\"\n", ping_data); + } + + return iscsit_process_nop_out(conn, cmd, hdr); +out: + if (cmd) + iscsit_free_cmd(cmd, false); + + kfree(ping_data); + return ret; +} + +static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf) +{ + switch (iscsi_tmf) { + case ISCSI_TM_FUNC_ABORT_TASK: + return TMR_ABORT_TASK; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + return TMR_ABORT_TASK_SET; + case ISCSI_TM_FUNC_CLEAR_ACA: + return TMR_CLEAR_ACA; + case ISCSI_TM_FUNC_CLEAR_TASK_SET: + return TMR_CLEAR_TASK_SET; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + return TMR_LUN_RESET; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + return TMR_TARGET_WARM_RESET; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + return TMR_TARGET_COLD_RESET; + default: + return TMR_UNKNOWN; + } +} + +int +iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct se_tmr_req *se_tmr; + struct iscsi_tmr_req *tmr_req; + struct iscsi_tm *hdr; + int out_of_order_cmdsn = 0, ret; + u8 function, tcm_function = TMR_UNKNOWN; + + hdr = (struct iscsi_tm *) buf; + hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; + function = hdr->flags; + + pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:" + " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:" + " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function, + hdr->rtt, hdr->refcmdsn, conn->cid); + + if ((function != ISCSI_TM_FUNC_ABORT_TASK) && + ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && + hdr->rtt != RESERVED_ITT)) { + pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); + hdr->rtt = RESERVED_ITT; + } + + if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && + !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { + pr_err("Task Management Request TASK_REASSIGN not" + " issued as immediate command, bad iSCSI Initiator" + "implementation\n"); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_PROTOCOL_ERROR, buf); + } + if ((function != ISCSI_TM_FUNC_ABORT_TASK) && + be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG) + hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG); + + cmd->data_direction = DMA_NONE; + cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL); + if (!cmd->tmr_req) { + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + buf); + } + + __target_init_cmd(&cmd->se_cmd, &iscsi_ops, + conn->sess->se_sess, 0, DMA_NONE, + TCM_SIMPLE_TAG, cmd->sense_buffer + 2, + scsilun_to_int(&hdr->lun), + conn->cmd_cnt); + + target_get_sess_cmd(&cmd->se_cmd, true); + + /* + * TASK_REASSIGN for ERL=2 / connection stays inside of + * LIO-Target $FABRIC_MOD + */ + if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { + tcm_function = iscsit_convert_tmf(function); + if (tcm_function == TMR_UNKNOWN) { + pr_err("Unknown iSCSI TMR Function:" + " 0x%02x\n", function); + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + } + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, + GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; + + cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; + cmd->i_state = ISTATE_SEND_TASKMGTRSP; + cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); + cmd->init_task_tag = hdr->itt; + cmd->targ_xfer_tag = 0xFFFFFFFF; + cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); + se_tmr = cmd->se_cmd.se_tmr_req; + tmr_req = cmd->tmr_req; + /* + * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN + */ + if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { + ret = transport_lookup_tmr_lun(&cmd->se_cmd); + if (ret < 0) { + se_tmr->response = ISCSI_TMF_RSP_NO_LUN; + goto attach; + } + } + + switch (function) { + case ISCSI_TM_FUNC_ABORT_TASK: + se_tmr->response = iscsit_tmr_abort_task(cmd, buf); + if (se_tmr->response) + goto attach; + break; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + case ISCSI_TM_FUNC_CLEAR_ACA: + case ISCSI_TM_FUNC_CLEAR_TASK_SET: + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + break; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { + se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; + goto attach; + } + break; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { + se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; + goto attach; + } + break; + case ISCSI_TM_FUNC_TASK_REASSIGN: + se_tmr->response = iscsit_tmr_task_reassign(cmd, buf); + /* + * Perform sanity checks on the ExpDataSN only if the + * TASK_REASSIGN was successful. + */ + if (se_tmr->response) + break; + + if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) + return iscsit_add_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); + break; + default: + pr_err("Unknown TMR function: 0x%02x, protocol" + " error.\n", function); + se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; + goto attach; + } + + if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && + (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) + se_tmr->call_transport = 1; +attach: + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { + int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) { + out_of_order_cmdsn = 1; + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); + return 0; + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { + return -1; + } + } + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); + + if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) + return 0; + /* + * Found the referenced task, send to transport for processing. + */ + if (se_tmr->call_transport) + return transport_generic_handle_tmr(&cmd->se_cmd); + + /* + * Could not find the referenced LUN, task, or Task Management + * command not authorized or supported. Change state and + * let the tx_thread send the response. + * + * For connection recovery, this is also the default action for + * TMR TASK_REASSIGN. + */ + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + target_put_sess_cmd(&cmd->se_cmd); + return 0; +} +EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); + +/* #warning FIXME: Support Text Command parameters besides SendTargets */ +int +iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_text *hdr) +{ + u32 payload_length = ntoh24(hdr->dlength); + + if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err("Unable to accept text parameter length: %u" + "greater than MaxXmitDataSegmentLength %u.\n", + payload_length, conn->conn_ops->MaxXmitDataSegmentLength); + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); + } + + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) || + (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) { + pr_err("Multi sequence text commands currently not supported\n"); + return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED, + (unsigned char *)hdr); + } + + pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," + " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, + hdr->exp_statsn, payload_length); + + cmd->iscsi_opcode = ISCSI_OP_TEXT; + cmd->i_state = ISTATE_SEND_TEXTRSP; + cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); + conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; + cmd->targ_xfer_tag = 0xFFFFFFFF; + cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); + cmd->data_direction = DMA_NONE; + kfree(cmd->text_in_ptr); + cmd->text_in_ptr = NULL; + + return 0; +} +EXPORT_SYMBOL(iscsit_setup_text_cmd); + +int +iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + struct iscsi_text *hdr) +{ + unsigned char *text_in = cmd->text_in_ptr, *text_ptr; + int cmdsn_ret; + + if (!text_in) { + cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt); + if (cmd->targ_xfer_tag == 0xFFFFFFFF) { + pr_err("Unable to locate text_in buffer for sendtargets" + " discovery\n"); + goto reject; + } + goto empty_sendtargets; + } + if (strncmp("SendTargets=", text_in, 12) != 0) { + pr_err("Received Text Data that is not" + " SendTargets, cannot continue.\n"); + goto reject; + } + /* '=' confirmed in strncmp */ + text_ptr = strchr(text_in, '='); + BUG_ON(!text_ptr); + if (!strncmp("=All", text_ptr, 5)) { + cmd->cmd_flags |= ICF_SENDTARGETS_ALL; + } else if (!strncmp("=iqn.", text_ptr, 5) || + !strncmp("=eui.", text_ptr, 5)) { + cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; + } else { + pr_err("Unable to locate valid SendTargets%s value\n", + text_ptr); + goto reject; + } + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + +empty_sendtargets: + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); + + if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, + (unsigned char *)hdr, hdr->cmdsn); + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + + return 0; + } + + return iscsit_execute_cmd(cmd, 0); + +reject: + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); +} +EXPORT_SYMBOL(iscsit_process_text_cmd); + +static int +iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct iscsi_text *hdr = (struct iscsi_text *)buf; + char *text_in = NULL; + u32 payload_length = ntoh24(hdr->dlength); + int rx_size, rc; + + rc = iscsit_setup_text_cmd(conn, cmd, hdr); + if (rc < 0) + return 0; + + rx_size = payload_length; + if (payload_length) { + u32 checksum = 0, data_crc = 0; + u32 padding = 0; + int niov = 0, rx_got; + struct kvec iov[2]; + + rx_size = ALIGN(payload_length, 4); + text_in = kzalloc(rx_size, GFP_KERNEL); + if (!text_in) + goto reject; + + cmd->text_in_ptr = text_in; + + memset(iov, 0, sizeof(iov)); + iov[niov].iov_base = text_in; + iov[niov++].iov_len = rx_size; + + padding = rx_size - payload_length; + if (padding) + pr_debug("Receiving %u additional bytes" + " for padding.\n", padding); + if (conn->conn_ops->DataDigest) { + iov[niov].iov_base = &checksum; + iov[niov++].iov_len = ISCSI_CRC_LEN; + rx_size += ISCSI_CRC_LEN; + } + + WARN_ON_ONCE(niov > ARRAY_SIZE(iov)); + rx_got = rx_data(conn, &iov[0], niov, rx_size); + if (rx_got != rx_size) + goto reject; + + if (conn->conn_ops->DataDigest) { + iscsit_do_crypto_hash_buf(conn->conn_rx_hash, + text_in, rx_size, 0, NULL, + &data_crc); + + if (checksum != data_crc) { + pr_err("Text data CRC32C DataDigest" + " 0x%08x does not match computed" + " 0x%08x\n", checksum, data_crc); + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " Text Data digest failure while in" + " ERL=0.\n"); + goto reject; + } else { + /* + * Silently drop this PDU and let the + * initiator plug the CmdSN gap. + */ + pr_debug("Dropping Text" + " Command CmdSN: 0x%08x due to" + " DataCRC error.\n", hdr->cmdsn); + kfree(text_in); + return 0; + } + } else { + pr_debug("Got CRC32C DataDigest" + " 0x%08x for %u bytes of text data.\n", + checksum, payload_length); + } + } + text_in[payload_length - 1] = '\0'; + pr_debug("Successfully read %d bytes of text" + " data.\n", payload_length); + } + + return iscsit_process_text_cmd(conn, cmd, hdr); + +reject: + kfree(cmd->text_in_ptr); + cmd->text_in_ptr = NULL; + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); +} + +int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsit_conn *conn_p; + struct iscsit_session *sess = conn->sess; + + pr_debug("Received logout request CLOSESESSION on CID: %hu" + " for SID: %u.\n", conn->cid, conn->sess->sid); + + atomic_set(&sess->session_logout, 1); + atomic_set(&conn->conn_logout_remove, 1); + conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION; + + iscsit_inc_conn_usage_count(conn); + iscsit_inc_session_usage_count(sess); + + spin_lock_bh(&sess->conn_lock); + list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) { + if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN) + continue; + + pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); + conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT; + } + spin_unlock_bh(&sess->conn_lock); + + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + + return 0; +} + +int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsit_conn *l_conn; + struct iscsit_session *sess = conn->sess; + + pr_debug("Received logout request CLOSECONNECTION for CID:" + " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); + + /* + * A Logout Request with a CLOSECONNECTION reason code for a CID + * can arrive on a connection with a differing CID. + */ + if (conn->cid == cmd->logout_cid) { + spin_lock_bh(&conn->state_lock); + pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); + conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; + + atomic_set(&conn->conn_logout_remove, 1); + conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION; + iscsit_inc_conn_usage_count(conn); + + spin_unlock_bh(&conn->state_lock); + } else { + /* + * Handle all different cid CLOSECONNECTION requests in + * iscsit_logout_post_handler_diffcid() as to give enough + * time for any non immediate command's CmdSN to be + * acknowledged on the connection in question. + * + * Here we simply make sure the CID is still around. + */ + l_conn = iscsit_get_conn_from_cid(sess, + cmd->logout_cid); + if (!l_conn) { + cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; + iscsit_add_cmd_to_response_queue(cmd, conn, + cmd->i_state); + return 0; + } + + iscsit_dec_conn_usage_count(l_conn); + } + + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + + return 0; +} + +int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + + pr_debug("Received explicit REMOVECONNFORRECOVERY logout for" + " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); + + if (sess->sess_ops->ErrorRecoveryLevel != 2) { + pr_err("Received Logout Request REMOVECONNFORRECOVERY" + " while ERL!=2.\n"); + cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + return 0; + } + + if (conn->cid == cmd->logout_cid) { + pr_err("Received Logout Request REMOVECONNFORRECOVERY" + " with CID: %hu on CID: %hu, implementation error.\n", + cmd->logout_cid, conn->cid); + cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + return 0; + } + + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + + return 0; +} + +int +iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char *buf) +{ + int cmdsn_ret, logout_remove = 0; + u8 reason_code = 0; + struct iscsi_logout *hdr; + struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); + + hdr = (struct iscsi_logout *) buf; + reason_code = (hdr->flags & 0x7f); + + if (tiqn) { + spin_lock(&tiqn->logout_stats.lock); + if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) + tiqn->logout_stats.normal_logouts++; + else + tiqn->logout_stats.abnormal_logouts++; + spin_unlock(&tiqn->logout_stats.lock); + } + + pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x" + " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n", + hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code, + hdr->cid, conn->cid); + + if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { + pr_err("Received logout request on connection that" + " is not in logged in state, ignoring request.\n"); + iscsit_free_cmd(cmd, false); + return 0; + } + + cmd->iscsi_opcode = ISCSI_OP_LOGOUT; + cmd->i_state = ISTATE_SEND_LOGOUTRSP; + cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); + conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; + cmd->targ_xfer_tag = 0xFFFFFFFF; + cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); + cmd->logout_cid = be16_to_cpu(hdr->cid); + cmd->logout_reason = reason_code; + cmd->data_direction = DMA_NONE; + + /* + * We need to sleep in these cases (by returning 1) until the Logout + * Response gets sent in the tx thread. + */ + if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || + ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && + be16_to_cpu(hdr->cid) == conn->cid)) + logout_remove = 1; + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) + iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); + + /* + * Immediate commands are executed, well, immediately. + * Non-Immediate Logout Commands are executed in CmdSN order. + */ + if (cmd->immediate_cmd) { + int ret = iscsit_execute_cmd(cmd, 0); + + if (ret < 0) + return ret; + } else { + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + logout_remove = 0; + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + return -1; + } + + return logout_remove; +} +EXPORT_SYMBOL(iscsit_handle_logout_cmd); + +int iscsit_handle_snack( + struct iscsit_conn *conn, + unsigned char *buf) +{ + struct iscsi_snack *hdr; + + hdr = (struct iscsi_snack *) buf; + hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; + + pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" + " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," + " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags, + hdr->begrun, hdr->runlength, conn->cid); + + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Initiator sent SNACK request while in" + " ErrorRecoveryLevel=0.\n"); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buf); + } + /* + * SNACK_DATA and SNACK_R2T are both 0, so check which function to + * call from inside iscsi_send_recovery_datain_or_r2t(). + */ + switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { + case 0: + return iscsit_handle_recovery_datain_or_r2t(conn, buf, + hdr->itt, + be32_to_cpu(hdr->ttt), + be32_to_cpu(hdr->begrun), + be32_to_cpu(hdr->runlength)); + case ISCSI_FLAG_SNACK_TYPE_STATUS: + return iscsit_handle_status_snack(conn, hdr->itt, + be32_to_cpu(hdr->ttt), + be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength)); + case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: + return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt), + be32_to_cpu(hdr->begrun), + be32_to_cpu(hdr->runlength)); + case ISCSI_FLAG_SNACK_TYPE_RDATA: + /* FIXME: Support R-Data SNACK */ + pr_err("R-Data SNACK Not Supported.\n"); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buf); + default: + pr_err("Unknown SNACK type 0x%02x, protocol" + " error.\n", hdr->flags & 0x0f); + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buf); + } + + return 0; +} +EXPORT_SYMBOL(iscsit_handle_snack); + +static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn) +{ + if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || + (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { + wait_for_completion_interruptible_timeout( + &conn->rx_half_close_comp, + ISCSI_RX_THREAD_TCP_TIMEOUT * HZ); + } +} + +static int iscsit_handle_immediate_data( + struct iscsit_cmd *cmd, + struct iscsi_scsi_req *hdr, + u32 length) +{ + int iov_ret, rx_got = 0, rx_size = 0; + u32 checksum, iov_count = 0, padding = 0; + struct iscsit_conn *conn = cmd->conn; + struct kvec *iov; + void *overflow_buf = NULL; + + BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length); + rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length); + iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, + cmd->orig_iov_data_count - 2, + cmd->write_data_done, rx_size); + if (iov_ret < 0) + return IMMEDIATE_DATA_CANNOT_RECOVER; + + iov_count = iov_ret; + iov = &cmd->iov_data[0]; + if (rx_size < length) { + /* + * Special case: length of immediate data exceeds the data + * buffer size derived from the CDB. + */ + overflow_buf = kmalloc(length - rx_size, GFP_KERNEL); + if (!overflow_buf) { + iscsit_unmap_iovec(cmd); + return IMMEDIATE_DATA_CANNOT_RECOVER; + } + cmd->overflow_buf = overflow_buf; + iov[iov_count].iov_base = overflow_buf; + iov[iov_count].iov_len = length - rx_size; + iov_count++; + rx_size = length; + } + + padding = ((-length) & 3); + if (padding != 0) { + iov[iov_count].iov_base = cmd->pad_bytes; + iov[iov_count++].iov_len = padding; + rx_size += padding; + } + + if (conn->conn_ops->DataDigest) { + iov[iov_count].iov_base = &checksum; + iov[iov_count++].iov_len = ISCSI_CRC_LEN; + rx_size += ISCSI_CRC_LEN; + } + + WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count); + rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); + + iscsit_unmap_iovec(cmd); + + if (rx_got != rx_size) { + iscsit_rx_thread_wait_for_tcp(conn); + return IMMEDIATE_DATA_CANNOT_RECOVER; + } + + if (conn->conn_ops->DataDigest) { + u32 data_crc; + + data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd, + cmd->write_data_done, length, padding, + cmd->pad_bytes); + + if (checksum != data_crc) { + pr_err("ImmediateData CRC32C DataDigest 0x%08x" + " does not match computed 0x%08x\n", checksum, + data_crc); + + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from" + " Immediate Data digest failure while" + " in ERL=0.\n"); + iscsit_reject_cmd(cmd, + ISCSI_REASON_DATA_DIGEST_ERROR, + (unsigned char *)hdr); + return IMMEDIATE_DATA_CANNOT_RECOVER; + } else { + iscsit_reject_cmd(cmd, + ISCSI_REASON_DATA_DIGEST_ERROR, + (unsigned char *)hdr); + return IMMEDIATE_DATA_ERL1_CRC_FAILURE; + } + } else { + pr_debug("Got CRC32C DataDigest 0x%08x for" + " %u bytes of Immediate Data\n", checksum, + length); + } + } + + cmd->write_data_done += length; + + if (cmd->write_data_done == cmd->se_cmd.data_length) { + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + } + + return IMMEDIATE_DATA_NORMAL_OPERATION; +} + +/* #warning iscsi_build_conn_drop_async_message() only sends out on connections + with active network interface */ +static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn) +{ + struct iscsit_cmd *cmd; + struct iscsit_conn *conn_p; + bool found = false; + + lockdep_assert_held(&conn->sess->conn_lock); + + /* + * Only send a Asynchronous Message on connections whos network + * interface is still functional. + */ + list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { + if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { + iscsit_inc_conn_usage_count(conn_p); + found = true; + break; + } + } + + if (!found) + return; + + cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); + if (!cmd) { + iscsit_dec_conn_usage_count(conn_p); + return; + } + + cmd->logout_cid = conn->cid; + cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; + cmd->i_state = ISTATE_SEND_ASYNCMSG; + + spin_lock_bh(&conn_p->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list); + spin_unlock_bh(&conn_p->cmd_lock); + + iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); + iscsit_dec_conn_usage_count(conn_p); +} + +static int iscsit_send_conn_drop_async_message( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct iscsi_async *hdr; + + cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; + + hdr = (struct iscsi_async *) cmd->pdu; + hdr->opcode = ISCSI_OP_ASYNC_EVENT; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + cmd->init_task_tag = RESERVED_ITT; + cmd->targ_xfer_tag = 0xFFFFFFFF; + put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); + cmd->stat_sn = conn->stat_sn++; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; + hdr->param1 = cpu_to_be16(cmd->logout_cid); + hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); + hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); + + pr_debug("Sending Connection Dropped Async Message StatSN:" + " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, + cmd->logout_cid, conn->cid); + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); +} + +static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn) +{ + if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || + (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { + wait_for_completion_interruptible_timeout( + &conn->tx_half_close_comp, + ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); + } +} + +void +iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn, + struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, + bool set_statsn) +{ + hdr->opcode = ISCSI_OP_SCSI_DATA_IN; + hdr->flags = datain->flags; + if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { + if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { + hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); + } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { + hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); + } + } + hton24(hdr->dlength, datain->length); + if (hdr->flags & ISCSI_FLAG_DATA_ACK) + int_to_scsilun(cmd->se_cmd.orig_fe_lun, + (struct scsi_lun *)&hdr->lun); + else + put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); + + hdr->itt = cmd->init_task_tag; + + if (hdr->flags & ISCSI_FLAG_DATA_ACK) + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); + else + hdr->ttt = cpu_to_be32(0xFFFFFFFF); + if (set_statsn) + hdr->statsn = cpu_to_be32(cmd->stat_sn); + else + hdr->statsn = cpu_to_be32(0xFFFFFFFF); + + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + hdr->datasn = cpu_to_be32(datain->data_sn); + hdr->offset = cpu_to_be32(datain->offset); + + pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," + " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", + cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), + ntohl(hdr->offset), datain->length, conn->cid); +} +EXPORT_SYMBOL(iscsit_build_datain_pdu); + +static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; + struct iscsi_datain datain; + struct iscsi_datain_req *dr; + int eodr = 0, ret; + bool set_statsn = false; + + memset(&datain, 0, sizeof(struct iscsi_datain)); + dr = iscsit_get_datain_values(cmd, &datain); + if (!dr) { + pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n", + cmd->init_task_tag); + return -1; + } + /* + * Be paranoid and double check the logic for now. + */ + if ((datain.offset + datain.length) > cmd->se_cmd.data_length) { + pr_err("Command ITT: 0x%08x, datain.offset: %u and" + " datain.length: %u exceeds cmd->data_length: %u\n", + cmd->init_task_tag, datain.offset, datain.length, + cmd->se_cmd.data_length); + return -1; + } + + atomic_long_add(datain.length, &conn->sess->tx_data_octets); + /* + * Special case for successfully execution w/ both DATAIN + * and Sense Data. + */ + if ((datain.flags & ISCSI_FLAG_DATA_STATUS) && + (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) + datain.flags &= ~ISCSI_FLAG_DATA_STATUS; + else { + if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) || + (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { + iscsit_increment_maxcmdsn(cmd, conn->sess); + cmd->stat_sn = conn->stat_sn++; + set_statsn = true; + } else if (dr->dr_complete == + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) + set_statsn = true; + } + + iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); + + ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0); + if (ret < 0) + return ret; + + if (dr->dr_complete) { + eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? + 2 : 1; + iscsit_free_datain_req(cmd, dr); + } + + return eodr; +} + +int +iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, + struct iscsi_logout_rsp *hdr) +{ + struct iscsit_conn *logout_conn = NULL; + struct iscsi_conn_recovery *cr = NULL; + struct iscsit_session *sess = conn->sess; + /* + * The actual shutting down of Sessions and/or Connections + * for CLOSESESSION and CLOSECONNECTION Logout Requests + * is done in scsi_logout_post_handler(). + */ + switch (cmd->logout_reason) { + case ISCSI_LOGOUT_REASON_CLOSE_SESSION: + pr_debug("iSCSI session logout successful, setting" + " logout response to ISCSI_LOGOUT_SUCCESS.\n"); + cmd->logout_response = ISCSI_LOGOUT_SUCCESS; + break; + case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: + if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND) + break; + /* + * For CLOSECONNECTION logout requests carrying + * a matching logout CID -> local CID, the reference + * for the local CID will have been incremented in + * iscsi_logout_closeconnection(). + * + * For CLOSECONNECTION logout requests carrying + * a different CID than the connection it arrived + * on, the connection responding to cmd->logout_cid + * is stopped in iscsit_logout_post_handler_diffcid(). + */ + + pr_debug("iSCSI CID: %hu logout on CID: %hu" + " successful.\n", cmd->logout_cid, conn->cid); + cmd->logout_response = ISCSI_LOGOUT_SUCCESS; + break; + case ISCSI_LOGOUT_REASON_RECOVERY: + if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) || + (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED)) + break; + /* + * If the connection is still active from our point of view + * force connection recovery to occur. + */ + logout_conn = iscsit_get_conn_from_cid_rcfr(sess, + cmd->logout_cid); + if (logout_conn) { + iscsit_connection_reinstatement_rcfr(logout_conn); + iscsit_dec_conn_usage_count(logout_conn); + } + + cr = iscsit_get_inactive_connection_recovery_entry( + conn->sess, cmd->logout_cid); + if (!cr) { + pr_err("Unable to locate CID: %hu for" + " REMOVECONNFORRECOVERY Logout Request.\n", + cmd->logout_cid); + cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; + break; + } + + iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn); + + pr_debug("iSCSI REMOVECONNFORRECOVERY logout" + " for recovery for CID: %hu on CID: %hu successful.\n", + cmd->logout_cid, conn->cid); + cmd->logout_response = ISCSI_LOGOUT_SUCCESS; + break; + default: + pr_err("Unknown cmd->logout_reason: 0x%02x\n", + cmd->logout_reason); + return -1; + } + + hdr->opcode = ISCSI_OP_LOGOUT_RSP; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hdr->response = cmd->logout_response; + hdr->itt = cmd->init_task_tag; + cmd->stat_sn = conn->stat_sn++; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + + iscsit_increment_maxcmdsn(cmd, conn->sess); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + + pr_debug("Built Logout Response ITT: 0x%08x StatSN:" + " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", + cmd->init_task_tag, cmd->stat_sn, hdr->response, + cmd->logout_cid, conn->cid); + + return 0; +} +EXPORT_SYMBOL(iscsit_build_logout_rsp); + +static int +iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + int rc; + + rc = iscsit_build_logout_rsp(cmd, conn, + (struct iscsi_logout_rsp *)&cmd->pdu[0]); + if (rc < 0) + return rc; + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); +} + +void +iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, + struct iscsi_nopin *hdr, bool nopout_response) +{ + hdr->opcode = ISCSI_OP_NOOP_IN; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hton24(hdr->dlength, cmd->buf_ptr_size); + if (nopout_response) + put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); + hdr->itt = cmd->init_task_tag; + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); + cmd->stat_sn = (nopout_response) ? conn->stat_sn++ : + conn->stat_sn; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + + if (nopout_response) + iscsit_increment_maxcmdsn(cmd, conn->sess); + + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + + pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," + " StatSN: 0x%08x, Length %u\n", (nopout_response) ? + "Solicited" : "Unsolicited", cmd->init_task_tag, + cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); +} +EXPORT_SYMBOL(iscsit_build_nopin_rsp); + +/* + * Unsolicited NOPIN, either requesting a response or not. + */ +static int iscsit_send_unsolicited_nopin( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn, + int want_response) +{ + struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; + int ret; + + iscsit_build_nopin_rsp(cmd, conn, hdr, false); + + pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" + " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); + + ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); + if (ret < 0) + return ret; + + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = want_response ? + ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + + return 0; +} + +static int +iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; + + iscsit_build_nopin_rsp(cmd, conn, hdr, true); + + /* + * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr. + * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size. + */ + pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size); + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, + cmd->buf_ptr, + cmd->buf_ptr_size); +} + +static int iscsit_send_r2t( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct iscsi_r2t *r2t; + struct iscsi_r2t_rsp *hdr; + int ret; + + r2t = iscsit_get_r2t_from_list(cmd); + if (!r2t) + return -1; + + hdr = (struct iscsi_r2t_rsp *) cmd->pdu; + memset(hdr, 0, ISCSI_HDR_LEN); + hdr->opcode = ISCSI_OP_R2T; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + int_to_scsilun(cmd->se_cmd.orig_fe_lun, + (struct scsi_lun *)&hdr->lun); + hdr->itt = cmd->init_task_tag; + if (conn->conn_transport->iscsit_get_r2t_ttt) + conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t); + else + r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); + hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); + hdr->statsn = cpu_to_be32(conn->stat_sn); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); + hdr->data_offset = cpu_to_be32(r2t->offset); + hdr->data_length = cpu_to_be32(r2t->xfer_len); + + pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" + " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", + (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, + r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, + r2t->offset, r2t->xfer_len, conn->cid); + + spin_lock_bh(&cmd->r2t_lock); + r2t->sent_r2t = 1; + spin_unlock_bh(&cmd->r2t_lock); + + ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); + if (ret < 0) { + return ret; + } + + spin_lock_bh(&cmd->dataout_timeout_lock); + iscsit_start_dataout_timer(cmd, conn); + spin_unlock_bh(&cmd->dataout_timeout_lock); + + return 0; +} + +/* + * @recovery: If called from iscsi_task_reassign_complete_write() for + * connection recovery. + */ +int iscsit_build_r2ts_for_cmd( + struct iscsit_conn *conn, + struct iscsit_cmd *cmd, + bool recovery) +{ + int first_r2t = 1; + u32 offset = 0, xfer_len = 0; + + spin_lock_bh(&cmd->r2t_lock); + if (cmd->cmd_flags & ICF_SENT_LAST_R2T) { + spin_unlock_bh(&cmd->r2t_lock); + return 0; + } + + if (conn->sess->sess_ops->DataSequenceInOrder && + !recovery) + cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done); + + while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { + if (conn->sess->sess_ops->DataSequenceInOrder) { + offset = cmd->r2t_offset; + + if (first_r2t && recovery) { + int new_data_end = offset + + conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len; + + if (new_data_end > cmd->se_cmd.data_length) + xfer_len = cmd->se_cmd.data_length - offset; + else + xfer_len = + conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len; + } else { + int new_data_end = offset + + conn->sess->sess_ops->MaxBurstLength; + + if (new_data_end > cmd->se_cmd.data_length) + xfer_len = cmd->se_cmd.data_length - offset; + else + xfer_len = conn->sess->sess_ops->MaxBurstLength; + } + + if ((s32)xfer_len < 0) { + cmd->cmd_flags |= ICF_SENT_LAST_R2T; + break; + } + + cmd->r2t_offset += xfer_len; + + if (cmd->r2t_offset == cmd->se_cmd.data_length) + cmd->cmd_flags |= ICF_SENT_LAST_R2T; + } else { + struct iscsi_seq *seq; + + seq = iscsit_get_seq_holder_for_r2t(cmd); + if (!seq) { + spin_unlock_bh(&cmd->r2t_lock); + return -1; + } + + offset = seq->offset; + xfer_len = seq->xfer_len; + + if (cmd->seq_send_order == cmd->seq_count) + cmd->cmd_flags |= ICF_SENT_LAST_R2T; + } + cmd->outstanding_r2ts++; + first_r2t = 0; + + if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) { + spin_unlock_bh(&cmd->r2t_lock); + return -1; + } + + if (cmd->cmd_flags & ICF_SENT_LAST_R2T) + break; + } + spin_unlock_bh(&cmd->r2t_lock); + + return 0; +} +EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd); + +void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn, + bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) +{ + if (inc_stat_sn) + cmd->stat_sn = conn->stat_sn++; + + atomic_long_inc(&conn->sess->rsp_pdus); + + memset(hdr, 0, ISCSI_HDR_LEN); + hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { + hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); + } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { + hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); + } + hdr->response = cmd->iscsi_response; + hdr->cmd_status = cmd->se_cmd.scsi_status; + hdr->itt = cmd->init_task_tag; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + + iscsit_increment_maxcmdsn(cmd, conn->sess); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + + pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x," + " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", + cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status, + cmd->se_cmd.scsi_status, conn->cid); +} +EXPORT_SYMBOL(iscsit_build_rsp_pdu); + +static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; + bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); + void *data_buf = NULL; + u32 padding = 0, data_buf_len = 0; + + iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); + + /* + * Attach SENSE DATA payload to iSCSI Response PDU + */ + if (cmd->se_cmd.sense_buffer && + ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || + (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { + put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer); + cmd->se_cmd.scsi_sense_length += sizeof (__be16); + + padding = -(cmd->se_cmd.scsi_sense_length) & 3; + hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); + data_buf = cmd->sense_buffer; + data_buf_len = cmd->se_cmd.scsi_sense_length + padding; + + if (padding) { + memset(cmd->sense_buffer + + cmd->se_cmd.scsi_sense_length, 0, padding); + pr_debug("Adding %u bytes of padding to" + " SENSE.\n", padding); + } + + pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" + " Response PDU\n", + cmd->se_cmd.scsi_sense_length); + } + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf, + data_buf_len); +} + +static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) +{ + switch (se_tmr->response) { + case TMR_FUNCTION_COMPLETE: + return ISCSI_TMF_RSP_COMPLETE; + case TMR_TASK_DOES_NOT_EXIST: + return ISCSI_TMF_RSP_NO_TASK; + case TMR_LUN_DOES_NOT_EXIST: + return ISCSI_TMF_RSP_NO_LUN; + case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: + return ISCSI_TMF_RSP_NOT_SUPPORTED; + case TMR_FUNCTION_REJECTED: + default: + return ISCSI_TMF_RSP_REJECTED; + } +} + +void +iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, + struct iscsi_tm_rsp *hdr) +{ + struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; + + hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; + hdr->flags = ISCSI_FLAG_CMD_FINAL; + hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); + hdr->itt = cmd->init_task_tag; + cmd->stat_sn = conn->stat_sn++; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + + iscsit_increment_maxcmdsn(cmd, conn->sess); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + + pr_debug("Built Task Management Response ITT: 0x%08x," + " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", + cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); +} +EXPORT_SYMBOL(iscsit_build_task_mgt_rsp); + +static int +iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; + + iscsit_build_task_mgt_rsp(cmd, conn, hdr); + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); +} + +#define SENDTARGETS_BUF_LIMIT 32768U + +static int +iscsit_build_sendtargets_response(struct iscsit_cmd *cmd, + enum iscsit_transport_type network_transport, + int skip_bytes, bool *completed) +{ + char *payload = NULL; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_portal_group *tpg; + struct iscsi_tiqn *tiqn; + struct iscsi_tpg_np *tpg_np; + int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; + int target_name_printed; + unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ + unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; + bool active; + + buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength, + SENDTARGETS_BUF_LIMIT); + + payload = kzalloc(buffer_len, GFP_KERNEL); + if (!payload) + return -ENOMEM; + + /* + * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE + * explicit case.. + */ + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { + text_ptr = strchr(text_in, '='); + if (!text_ptr) { + pr_err("Unable to locate '=' string in text_in:" + " %s\n", text_in); + kfree(payload); + return -EINVAL; + } + /* + * Skip over '=' character.. + */ + text_ptr += 1; + } + + spin_lock(&tiqn_lock); + list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { + if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && + strcmp(tiqn->tiqn, text_ptr)) { + continue; + } + + target_name_printed = 0; + + spin_lock(&tiqn->tiqn_tpg_lock); + list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { + + /* If demo_mode_discovery=0 and generate_node_acls=0 + * (demo mode dislabed) do not return + * TargetName+TargetAddress unless a NodeACL exists. + */ + + if ((tpg->tpg_attrib.generate_node_acls == 0) && + (tpg->tpg_attrib.demo_mode_discovery == 0) && + (!target_tpg_has_node_acl(&tpg->tpg_se_tpg, + cmd->conn->sess->sess_ops->InitiatorName))) { + continue; + } + + spin_lock(&tpg->tpg_state_lock); + active = (tpg->tpg_state == TPG_STATE_ACTIVE); + spin_unlock(&tpg->tpg_state_lock); + + if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets) + continue; + + spin_lock(&tpg->tpg_np_lock); + list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, + tpg_np_list) { + struct iscsi_np *np = tpg_np->tpg_np; + struct sockaddr_storage *sockaddr; + + if (np->np_network_transport != network_transport) + continue; + + if (!target_name_printed) { + len = sprintf(buf, "TargetName=%s", + tiqn->tiqn); + len += 1; + + if ((len + payload_len) > buffer_len) { + spin_unlock(&tpg->tpg_np_lock); + spin_unlock(&tiqn->tiqn_tpg_lock); + end_of_buf = 1; + goto eob; + } + + if (skip_bytes && len <= skip_bytes) { + skip_bytes -= len; + } else { + memcpy(payload + payload_len, buf, len); + payload_len += len; + target_name_printed = 1; + if (len > skip_bytes) + skip_bytes = 0; + } + } + + if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr)) + sockaddr = &conn->local_sockaddr; + else + sockaddr = &np->np_sockaddr; + + len = sprintf(buf, "TargetAddress=" + "%pISpc,%hu", + sockaddr, + tpg->tpgt); + len += 1; + + if ((len + payload_len) > buffer_len) { + spin_unlock(&tpg->tpg_np_lock); + spin_unlock(&tiqn->tiqn_tpg_lock); + end_of_buf = 1; + goto eob; + } + + if (skip_bytes && len <= skip_bytes) { + skip_bytes -= len; + } else { + memcpy(payload + payload_len, buf, len); + payload_len += len; + if (len > skip_bytes) + skip_bytes = 0; + } + } + spin_unlock(&tpg->tpg_np_lock); + } + spin_unlock(&tiqn->tiqn_tpg_lock); +eob: + if (end_of_buf) { + *completed = false; + break; + } + + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) + break; + } + spin_unlock(&tiqn_lock); + + cmd->buf_ptr = payload; + + return payload_len; +} + +int +iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, + struct iscsi_text_rsp *hdr, + enum iscsit_transport_type network_transport) +{ + int text_length, padding; + bool completed = true; + + text_length = iscsit_build_sendtargets_response(cmd, network_transport, + cmd->read_data_done, + &completed); + if (text_length < 0) + return text_length; + + if (completed) { + hdr->flags = ISCSI_FLAG_CMD_FINAL; + } else { + hdr->flags = ISCSI_FLAG_TEXT_CONTINUE; + cmd->read_data_done += text_length; + if (cmd->targ_xfer_tag == 0xFFFFFFFF) + cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); + } + hdr->opcode = ISCSI_OP_TEXT_RSP; + padding = ((-text_length) & 3); + hton24(hdr->dlength, text_length); + hdr->itt = cmd->init_task_tag; + hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); + cmd->stat_sn = conn->stat_sn++; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + + iscsit_increment_maxcmdsn(cmd, conn->sess); + /* + * Reset maxcmdsn_inc in multi-part text payload exchanges to + * correctly increment MaxCmdSN for each response answering a + * non immediate text request with a valid CmdSN. + */ + cmd->maxcmdsn_inc = 0; + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + + pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x," + " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag, + cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid, + !!(hdr->flags & ISCSI_FLAG_CMD_FINAL), + !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)); + + return text_length + padding; +} +EXPORT_SYMBOL(iscsit_build_text_rsp); + +static int iscsit_send_text_rsp( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; + int text_length; + + text_length = iscsit_build_text_rsp(cmd, conn, hdr, + conn->conn_transport->transport_type); + if (text_length < 0) + return text_length; + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, + cmd->buf_ptr, + text_length); +} + +void +iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn, + struct iscsi_reject *hdr) +{ + hdr->opcode = ISCSI_OP_REJECT; + hdr->reason = cmd->reject_reason; + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + hton24(hdr->dlength, ISCSI_HDR_LEN); + hdr->ffffffff = cpu_to_be32(0xffffffff); + cmd->stat_sn = conn->stat_sn++; + hdr->statsn = cpu_to_be32(cmd->stat_sn); + hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + +} +EXPORT_SYMBOL(iscsit_build_reject); + +static int iscsit_send_reject( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; + + iscsit_build_reject(cmd, conn, hdr); + + pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," + " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); + + return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, + cmd->buf_ptr, + ISCSI_HDR_LEN); +} + +void iscsit_thread_get_cpumask(struct iscsit_conn *conn) +{ + int ord, cpu; + cpumask_var_t conn_allowed_cpumask; + + /* + * bitmap_id is assigned from iscsit_global->ts_bitmap from + * within iscsit_start_kthreads() + * + * Here we use bitmap_id to determine which CPU that this + * iSCSI connection's RX/TX threads will be scheduled to + * execute upon. + */ + if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) { + ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); + for_each_online_cpu(cpu) { + if (ord-- == 0) { + cpumask_set_cpu(cpu, conn->conn_cpumask); + return; + } + } + } else { + cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask, + cpu_online_mask); + + cpumask_clear(conn->conn_cpumask); + ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask); + for_each_cpu(cpu, conn_allowed_cpumask) { + if (ord-- == 0) { + cpumask_set_cpu(cpu, conn->conn_cpumask); + free_cpumask_var(conn_allowed_cpumask); + return; + } + } + free_cpumask_var(conn_allowed_cpumask); + } + /* + * This should never be reached.. + */ + dump_stack(); + cpumask_setall(conn->conn_cpumask); +} + +static void iscsit_thread_reschedule(struct iscsit_conn *conn) +{ + /* + * If iscsit_global->allowed_cpumask modified, reschedule iSCSI + * connection's RX/TX threads update conn->allowed_cpumask. + */ + if (!cpumask_equal(iscsit_global->allowed_cpumask, + conn->allowed_cpumask)) { + iscsit_thread_get_cpumask(conn); + conn->conn_tx_reset_cpumask = 1; + conn->conn_rx_reset_cpumask = 1; + cpumask_copy(conn->allowed_cpumask, + iscsit_global->allowed_cpumask); + } +} + +void iscsit_thread_check_cpumask( + struct iscsit_conn *conn, + struct task_struct *p, + int mode) +{ + /* + * The TX and RX threads maybe call iscsit_thread_check_cpumask() + * at the same time. The RX thread might be faster and return from + * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0. + * Then the TX thread sets it back to 1. + * The next time the RX thread loops, it sees conn_rx_reset_cpumask + * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0. + */ + iscsit_thread_reschedule(conn); + + /* + * mode == 1 signals iscsi_target_tx_thread() usage. + * mode == 0 signals iscsi_target_rx_thread() usage. + */ + if (mode == 1) { + if (!conn->conn_tx_reset_cpumask) + return; + } else { + if (!conn->conn_rx_reset_cpumask) + return; + } + + /* + * Update the CPU mask for this single kthread so that + * both TX and RX kthreads are scheduled to run on the + * same CPU. + */ + set_cpus_allowed_ptr(p, conn->conn_cpumask); + if (mode == 1) + conn->conn_tx_reset_cpumask = 0; + else + conn->conn_rx_reset_cpumask = 0; +} +EXPORT_SYMBOL(iscsit_thread_check_cpumask); + +int +iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state) +{ + int ret; + + switch (state) { + case ISTATE_SEND_R2T: + ret = iscsit_send_r2t(cmd, conn); + if (ret < 0) + goto err; + break; + case ISTATE_REMOVE: + spin_lock_bh(&conn->cmd_lock); + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + iscsit_free_cmd(cmd, false); + break; + case ISTATE_SEND_NOPIN_WANT_RESPONSE: + iscsit_mod_nopin_response_timer(conn); + ret = iscsit_send_unsolicited_nopin(cmd, conn, 1); + if (ret < 0) + goto err; + break; + case ISTATE_SEND_NOPIN_NO_RESPONSE: + ret = iscsit_send_unsolicited_nopin(cmd, conn, 0); + if (ret < 0) + goto err; + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, state, + conn->cid); + goto err; + } + + return 0; + +err: + return -1; +} +EXPORT_SYMBOL(iscsit_immediate_queue); + +static int +iscsit_handle_immediate_queue(struct iscsit_conn *conn) +{ + struct iscsit_transport *t = conn->conn_transport; + struct iscsi_queue_req *qr; + struct iscsit_cmd *cmd; + u8 state; + int ret; + + while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { + atomic_set(&conn->check_immediate_queue, 0); + cmd = qr->cmd; + state = qr->state; + kmem_cache_free(lio_qr_cache, qr); + + ret = t->iscsit_immediate_queue(conn, cmd, state); + if (ret < 0) + return ret; + } + + return 0; +} + +int +iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state) +{ + int ret; + +check_rsp_state: + switch (state) { + case ISTATE_SEND_DATAIN: + ret = iscsit_send_datain(cmd, conn); + if (ret < 0) + goto err; + else if (!ret) + /* more drs */ + goto check_rsp_state; + else if (ret == 1) { + /* all done */ + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + + if (atomic_read(&conn->check_immediate_queue)) + return 1; + + return 0; + } else if (ret == 2) { + /* Still must send status, + SCF_TRANSPORT_TASK_SENSE was set */ + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SEND_STATUS; + spin_unlock_bh(&cmd->istate_lock); + state = ISTATE_SEND_STATUS; + goto check_rsp_state; + } + + break; + case ISTATE_SEND_STATUS: + case ISTATE_SEND_STATUS_RECOVERY: + ret = iscsit_send_response(cmd, conn); + break; + case ISTATE_SEND_LOGOUTRSP: + ret = iscsit_send_logout(cmd, conn); + break; + case ISTATE_SEND_ASYNCMSG: + ret = iscsit_send_conn_drop_async_message( + cmd, conn); + break; + case ISTATE_SEND_NOPIN: + ret = iscsit_send_nopin(cmd, conn); + break; + case ISTATE_SEND_REJECT: + ret = iscsit_send_reject(cmd, conn); + break; + case ISTATE_SEND_TASKMGTRSP: + ret = iscsit_send_task_mgt_rsp(cmd, conn); + if (ret != 0) + break; + ret = iscsit_tmr_post_handler(cmd, conn); + if (ret != 0) + iscsit_fall_back_to_erl0(conn->sess); + break; + case ISTATE_SEND_TEXTRSP: + ret = iscsit_send_text_rsp(cmd, conn); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + state, conn->cid); + goto err; + } + if (ret < 0) + goto err; + + switch (state) { + case ISTATE_SEND_LOGOUTRSP: + if (!iscsit_logout_post_handler(cmd, conn)) + return -ECONNRESET; + fallthrough; + case ISTATE_SEND_STATUS: + case ISTATE_SEND_ASYNCMSG: + case ISTATE_SEND_NOPIN: + case ISTATE_SEND_STATUS_RECOVERY: + case ISTATE_SEND_TEXTRSP: + case ISTATE_SEND_TASKMGTRSP: + case ISTATE_SEND_REJECT: + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + cmd->i_state, conn->cid); + goto err; + } + + if (atomic_read(&conn->check_immediate_queue)) + return 1; + + return 0; + +err: + return -1; +} +EXPORT_SYMBOL(iscsit_response_queue); + +static int iscsit_handle_response_queue(struct iscsit_conn *conn) +{ + struct iscsit_transport *t = conn->conn_transport; + struct iscsi_queue_req *qr; + struct iscsit_cmd *cmd; + u8 state; + int ret; + + while ((qr = iscsit_get_cmd_from_response_queue(conn))) { + cmd = qr->cmd; + state = qr->state; + kmem_cache_free(lio_qr_cache, qr); + + ret = t->iscsit_response_queue(conn, cmd, state); + if (ret == 1 || ret < 0) + return ret; + } + + return 0; +} + +int iscsi_target_tx_thread(void *arg) +{ + int ret = 0; + struct iscsit_conn *conn = arg; + bool conn_freed = false; + + /* + * Allow ourselves to be interrupted by SIGINT so that a + * connection recovery / failure event can be triggered externally. + */ + allow_signal(SIGINT); + + while (!kthread_should_stop()) { + /* + * Ensure that both TX and RX per connection kthreads + * are scheduled to run on the same CPU. + */ + iscsit_thread_check_cpumask(conn, current, 1); + + wait_event_interruptible(conn->queues_wq, + !iscsit_conn_all_queues_empty(conn)); + + if (signal_pending(current)) + goto transport_err; + +get_immediate: + ret = iscsit_handle_immediate_queue(conn); + if (ret < 0) + goto transport_err; + + ret = iscsit_handle_response_queue(conn); + if (ret == 1) { + goto get_immediate; + } else if (ret == -ECONNRESET) { + conn_freed = true; + goto out; + } else if (ret < 0) { + goto transport_err; + } + } + +transport_err: + /* + * Avoid the normal connection failure code-path if this connection + * is still within LOGIN mode, and iscsi_np process context is + * responsible for cleaning up the early connection failure. + */ + if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) + iscsit_take_action_for_connection_exit(conn, &conn_freed); +out: + if (!conn_freed) { + while (!kthread_should_stop()) { + msleep(100); + } + } + return 0; +} + +static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf) +{ + struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf; + struct iscsit_cmd *cmd; + int ret = 0; + + switch (hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_SCSI_CMD: + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + + ret = iscsit_handle_scsi_cmd(conn, cmd, buf); + break; + case ISCSI_OP_SCSI_DATA_OUT: + ret = iscsit_handle_data_out(conn, buf); + break; + case ISCSI_OP_NOOP_OUT: + cmd = NULL; + if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + } + ret = iscsit_handle_nop_out(conn, cmd, buf); + break; + case ISCSI_OP_SCSI_TMFUNC: + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + + ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); + break; + case ISCSI_OP_TEXT: + if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { + cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); + if (!cmd) + goto reject; + } else { + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + } + + ret = iscsit_handle_text_cmd(conn, cmd, buf); + break; + case ISCSI_OP_LOGOUT: + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + + ret = iscsit_handle_logout_cmd(conn, cmd, buf); + if (ret > 0) + wait_for_completion_timeout(&conn->conn_logout_comp, + SECONDS_FOR_LOGOUT_COMP * HZ); + break; + case ISCSI_OP_SNACK: + ret = iscsit_handle_snack(conn, buf); + break; + default: + pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode); + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Cannot recover from unknown" + " opcode while ERL=0, closing iSCSI connection.\n"); + return -1; + } + pr_err("Unable to recover from unknown opcode while OFMarker=No," + " closing iSCSI connection.\n"); + ret = -1; + break; + } + + return ret; +reject: + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); +} + +static bool iscsi_target_check_conn_state(struct iscsit_conn *conn) +{ + bool ret; + + spin_lock_bh(&conn->state_lock); + ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); + spin_unlock_bh(&conn->state_lock); + + return ret; +} + +static void iscsit_get_rx_pdu(struct iscsit_conn *conn) +{ + int ret; + u8 *buffer, *tmp_buf, opcode; + u32 checksum = 0, digest = 0; + struct iscsi_hdr *hdr; + struct kvec iov; + + buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return; + + while (!kthread_should_stop()) { + /* + * Ensure that both TX and RX per connection kthreads + * are scheduled to run on the same CPU. + */ + iscsit_thread_check_cpumask(conn, current, 0); + + memset(&iov, 0, sizeof(struct kvec)); + + iov.iov_base = buffer; + iov.iov_len = ISCSI_HDR_LEN; + + ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); + if (ret != ISCSI_HDR_LEN) { + iscsit_rx_thread_wait_for_tcp(conn); + break; + } + + hdr = (struct iscsi_hdr *) buffer; + if (hdr->hlength) { + iov.iov_len = hdr->hlength * 4; + tmp_buf = krealloc(buffer, + ISCSI_HDR_LEN + iov.iov_len, + GFP_KERNEL); + if (!tmp_buf) + break; + + buffer = tmp_buf; + iov.iov_base = &buffer[ISCSI_HDR_LEN]; + + ret = rx_data(conn, &iov, 1, iov.iov_len); + if (ret != iov.iov_len) { + iscsit_rx_thread_wait_for_tcp(conn); + break; + } + } + + if (conn->conn_ops->HeaderDigest) { + iov.iov_base = &digest; + iov.iov_len = ISCSI_CRC_LEN; + + ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); + if (ret != ISCSI_CRC_LEN) { + iscsit_rx_thread_wait_for_tcp(conn); + break; + } + + iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, + ISCSI_HDR_LEN, 0, NULL, + &checksum); + + if (digest != checksum) { + pr_err("HeaderDigest CRC32C failed," + " received 0x%08x, computed 0x%08x\n", + digest, checksum); + /* + * Set the PDU to 0xff so it will intentionally + * hit default in the switch below. + */ + memset(buffer, 0xff, ISCSI_HDR_LEN); + atomic_long_inc(&conn->sess->conn_digest_errors); + } else { + pr_debug("Got HeaderDigest CRC32C" + " 0x%08x\n", checksum); + } + } + + if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) + break; + + opcode = buffer[0] & ISCSI_OPCODE_MASK; + + if (conn->sess->sess_ops->SessionType && + ((!(opcode & ISCSI_OP_TEXT)) || + (!(opcode & ISCSI_OP_LOGOUT)))) { + pr_err("Received illegal iSCSI Opcode: 0x%02x" + " while in Discovery Session, rejecting.\n", opcode); + iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + buffer); + break; + } + + ret = iscsi_target_rx_opcode(conn, buffer); + if (ret < 0) + break; + } + + kfree(buffer); +} + +int iscsi_target_rx_thread(void *arg) +{ + int rc; + struct iscsit_conn *conn = arg; + bool conn_freed = false; + + /* + * Allow ourselves to be interrupted by SIGINT so that a + * connection recovery / failure event can be triggered externally. + */ + allow_signal(SIGINT); + /* + * Wait for iscsi_post_login_handler() to complete before allowing + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); + if (rc < 0 || iscsi_target_check_conn_state(conn)) + goto out; + + if (!conn->conn_transport->iscsit_get_rx_pdu) + return 0; + + conn->conn_transport->iscsit_get_rx_pdu(conn); + + if (!signal_pending(current)) + atomic_set(&conn->transport_failed, 1); + iscsit_take_action_for_connection_exit(conn, &conn_freed); + +out: + if (!conn_freed) { + while (!kthread_should_stop()) { + msleep(100); + } + } + + return 0; +} + +static void iscsit_release_commands_from_conn(struct iscsit_conn *conn) +{ + LIST_HEAD(tmp_list); + struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL; + struct iscsit_session *sess = conn->sess; + /* + * We expect this function to only ever be called from either RX or TX + * thread context via iscsit_close_connection() once the other context + * has been reset -> returned sleeping pre-handler state. + */ + spin_lock_bh(&conn->cmd_lock); + list_splice_init(&conn->conn_cmd_list, &tmp_list); + + list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (!se_cmd->se_tfo) + continue; + + spin_lock_irq(&se_cmd->t_state_lock); + if (se_cmd->transport_state & CMD_T_ABORTED) { + if (!(se_cmd->transport_state & CMD_T_TAS)) + /* + * LIO's abort path owns the cleanup for this, + * so put it back on the list and let + * aborted_task handle it. + */ + list_move_tail(&cmd->i_conn_node, + &conn->conn_cmd_list); + } else { + se_cmd->transport_state |= CMD_T_FABRIC_STOP; + } + + if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { + /* + * We never submitted the cmd to LIO core, so we have + * to tell LIO to perform the completion process. + */ + spin_unlock_irq(&se_cmd->t_state_lock); + target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED); + continue; + } + spin_unlock_irq(&se_cmd->t_state_lock); + } + spin_unlock_bh(&conn->cmd_lock); + + list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { + list_del_init(&cmd->i_conn_node); + + iscsit_increment_maxcmdsn(cmd, sess); + iscsit_free_cmd(cmd, true); + + } + + /* + * Wait on commands that were cleaned up via the aborted_task path. + * LLDs that implement iscsit_wait_conn will already have waited for + * commands. + */ + if (!conn->conn_transport->iscsit_wait_conn) { + target_stop_cmd_counter(conn->cmd_cnt); + target_wait_for_cmds(conn->cmd_cnt); + } +} + +static void iscsit_stop_timers_for_cmds( + struct iscsit_conn *conn) +{ + struct iscsit_cmd *cmd; + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); + } + spin_unlock_bh(&conn->cmd_lock); +} + +int iscsit_close_connection( + struct iscsit_conn *conn) +{ + int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT); + struct iscsit_session *sess = conn->sess; + + pr_debug("Closing iSCSI connection CID %hu on SID:" + " %u\n", conn->cid, sess->sid); + /* + * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD + * case just in case the RX Thread in iscsi_target_rx_opcode() is + * sleeping and the logout response never got sent because the + * connection failed. + * + * However for iser-target, isert_wait4logout() is using conn_logout_comp + * to signal logout response TX interrupt completion. Go ahead and skip + * this for iser since isert_rx_opcode() does not wait on logout failure, + * and to avoid iscsit_conn pointer dereference in iser-target code. + */ + if (!conn->conn_transport->rdma_shutdown) + complete(&conn->conn_logout_comp); + + if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { + if (conn->tx_thread && + cmpxchg(&conn->tx_thread_active, true, false)) { + send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + } + } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { + if (conn->rx_thread && + cmpxchg(&conn->rx_thread_active, true, false)) { + send_sig(SIGINT, conn->rx_thread, 1); + kthread_stop(conn->rx_thread); + } + } + + spin_lock(&iscsit_global->ts_bitmap_lock); + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, + get_order(1)); + spin_unlock(&iscsit_global->ts_bitmap_lock); + + iscsit_stop_timers_for_cmds(conn); + iscsit_stop_nopin_response_timer(conn); + iscsit_stop_nopin_timer(conn); + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + + /* + * During Connection recovery drop unacknowledged out of order + * commands for this connection, and prepare the other commands + * for reallegiance. + * + * During normal operation clear the out of order commands (but + * do not free the struct iscsi_ooo_cmdsn's) and release all + * struct iscsit_cmds. + */ + if (atomic_read(&conn->connection_recovery)) { + iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); + iscsit_prepare_cmds_for_reallegiance(conn); + } else { + iscsit_clear_ooo_cmdsns_for_conn(conn); + iscsit_release_commands_from_conn(conn); + } + iscsit_free_queue_reqs_for_conn(conn); + + /* + * Handle decrementing session or connection usage count if + * a logout response was not able to be sent because the + * connection failed. Fall back to Session Recovery here. + */ + if (atomic_read(&conn->conn_logout_remove)) { + if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { + iscsit_dec_conn_usage_count(conn); + iscsit_dec_session_usage_count(sess); + } + if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) + iscsit_dec_conn_usage_count(conn); + + atomic_set(&conn->conn_logout_remove, 0); + atomic_set(&sess->session_reinstatement, 0); + atomic_set(&sess->session_fall_back_to_erl0, 1); + } + + spin_lock_bh(&sess->conn_lock); + list_del(&conn->conn_list); + + /* + * Attempt to let the Initiator know this connection failed by + * sending an Connection Dropped Async Message on another + * active connection. + */ + if (atomic_read(&conn->connection_recovery)) + iscsit_build_conn_drop_async_message(conn); + + spin_unlock_bh(&sess->conn_lock); + + /* + * If connection reinstatement is being performed on this connection, + * up the connection reinstatement semaphore that is being blocked on + * in iscsit_cause_connection_reinstatement(). + */ + spin_lock_bh(&conn->state_lock); + if (atomic_read(&conn->sleep_on_conn_wait_comp)) { + spin_unlock_bh(&conn->state_lock); + complete(&conn->conn_wait_comp); + wait_for_completion(&conn->conn_post_wait_comp); + spin_lock_bh(&conn->state_lock); + } + + /* + * If connection reinstatement is being performed on this connection + * by receiving a REMOVECONNFORRECOVERY logout request, up the + * connection wait rcfr semaphore that is being blocked on + * an iscsit_connection_reinstatement_rcfr(). + */ + if (atomic_read(&conn->connection_wait_rcfr)) { + spin_unlock_bh(&conn->state_lock); + complete(&conn->conn_wait_rcfr_comp); + wait_for_completion(&conn->conn_post_wait_comp); + spin_lock_bh(&conn->state_lock); + } + atomic_set(&conn->connection_reinstatement, 1); + spin_unlock_bh(&conn->state_lock); + + /* + * If any other processes are accessing this connection pointer we + * must wait until they have completed. + */ + iscsit_check_conn_usage_count(conn); + + ahash_request_free(conn->conn_tx_hash); + if (conn->conn_rx_hash) { + struct crypto_ahash *tfm; + + tfm = crypto_ahash_reqtfm(conn->conn_rx_hash); + ahash_request_free(conn->conn_rx_hash); + crypto_free_ahash(tfm); + } + + if (conn->sock) + sock_release(conn->sock); + + if (conn->conn_transport->iscsit_free_conn) + conn->conn_transport->iscsit_free_conn(conn); + + pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); + conn->conn_state = TARG_CONN_STATE_FREE; + iscsit_free_conn(conn); + + spin_lock_bh(&sess->conn_lock); + atomic_dec(&sess->nconn); + pr_debug("Decremented iSCSI connection count to %d from node:" + " %s\n", atomic_read(&sess->nconn), + sess->sess_ops->InitiatorName); + /* + * Make sure that if one connection fails in an non ERL=2 iSCSI + * Session that they all fail. + */ + if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout && + !atomic_read(&sess->session_logout)) + atomic_set(&sess->session_fall_back_to_erl0, 1); + + /* + * If this was not the last connection in the session, and we are + * performing session reinstatement or falling back to ERL=0, call + * iscsit_stop_session() without sleeping to shutdown the other + * active connections. + */ + if (atomic_read(&sess->nconn)) { + if (!atomic_read(&sess->session_reinstatement) && + !atomic_read(&sess->session_fall_back_to_erl0)) { + spin_unlock_bh(&sess->conn_lock); + return 0; + } + if (!atomic_read(&sess->session_stop_active)) { + atomic_set(&sess->session_stop_active, 1); + spin_unlock_bh(&sess->conn_lock); + iscsit_stop_session(sess, 0, 0); + return 0; + } + spin_unlock_bh(&sess->conn_lock); + return 0; + } + + /* + * If this was the last connection in the session and one of the + * following is occurring: + * + * Session Reinstatement is not being performed, and are falling back + * to ERL=0 call iscsit_close_session(). + * + * Session Logout was requested. iscsit_close_session() will be called + * elsewhere. + * + * Session Continuation is not being performed, start the Time2Retain + * handler and check if sleep_on_sess_wait_sem is active. + */ + if (!atomic_read(&sess->session_reinstatement) && + atomic_read(&sess->session_fall_back_to_erl0)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess, true); + + return 0; + } else if (atomic_read(&sess->session_logout)) { + pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); + sess->session_state = TARG_SESS_STATE_FREE; + + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess, true); + } else { + spin_unlock_bh(&sess->conn_lock); + } + + return 0; + } else { + pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); + sess->session_state = TARG_SESS_STATE_FAILED; + + if (!atomic_read(&sess->session_continuation)) + iscsit_start_time2retain_handler(sess); + + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess, true); + } else { + spin_unlock_bh(&sess->conn_lock); + } + + return 0; + } +} + +/* + * If the iSCSI Session for the iSCSI Initiator Node exists, + * forcefully shutdown the iSCSI NEXUS. + */ +int iscsit_close_session(struct iscsit_session *sess, bool can_sleep) +{ + struct iscsi_portal_group *tpg = sess->tpg; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + + if (atomic_read(&sess->nconn)) { + pr_err("%d connection(s) still exist for iSCSI session" + " to %s\n", atomic_read(&sess->nconn), + sess->sess_ops->InitiatorName); + BUG(); + } + + spin_lock_bh(&se_tpg->session_lock); + atomic_set(&sess->session_logout, 1); + atomic_set(&sess->session_reinstatement, 1); + iscsit_stop_time2retain_timer(sess); + spin_unlock_bh(&se_tpg->session_lock); + + if (sess->sess_ops->ErrorRecoveryLevel == 2) + iscsit_free_connection_recovery_entries(sess); + + /* + * transport_deregister_session_configfs() will clear the + * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context + * can be setting it again with __transport_register_session() in + * iscsi_post_login_handler() again after the iscsit_stop_session() + * completes in iscsi_np context. + */ + transport_deregister_session_configfs(sess->se_sess); + + /* + * If any other processes are accessing this session pointer we must + * wait until they have completed. If we are in an interrupt (the + * time2retain handler) and contain and active session usage count we + * restart the timer and exit. + */ + if (iscsit_check_session_usage_count(sess, can_sleep)) { + atomic_set(&sess->session_logout, 0); + iscsit_start_time2retain_handler(sess); + return 0; + } + + transport_deregister_session(sess->se_sess); + + iscsit_free_all_ooo_cmdsns(sess); + + spin_lock_bh(&se_tpg->session_lock); + pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); + sess->session_state = TARG_SESS_STATE_FREE; + pr_debug("Released iSCSI session from node: %s\n", + sess->sess_ops->InitiatorName); + tpg->nsessions--; + if (tpg->tpg_tiqn) + tpg->tpg_tiqn->tiqn_nsessions--; + + pr_debug("Decremented number of active iSCSI Sessions on" + " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); + + ida_free(&sess_ida, sess->session_index); + kfree(sess->sess_ops); + sess->sess_ops = NULL; + spin_unlock_bh(&se_tpg->session_lock); + + kfree(sess); + return 0; +} + +static void iscsit_logout_post_handler_closesession( + struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + int sleep = 1; + /* + * Traditional iscsi/tcp will invoke this logic from TX thread + * context during session logout, so clear tx_thread_active and + * sleep if iscsit_close_connection() has not already occured. + * + * Since iser-target invokes this logic from it's own workqueue, + * always sleep waiting for RX/TX thread shutdown to complete + * within iscsit_close_connection(). + */ + if (!conn->conn_transport->rdma_shutdown) { + sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); + + iscsit_dec_conn_usage_count(conn); + atomic_set(&sess->session_close, 1); + iscsit_stop_session(sess, sleep, sleep); + iscsit_dec_session_usage_count(sess); +} + +static void iscsit_logout_post_handler_samecid( + struct iscsit_conn *conn) +{ + int sleep = 1; + + if (!conn->conn_transport->rdma_shutdown) { + sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); + + iscsit_cause_connection_reinstatement(conn, sleep); + iscsit_dec_conn_usage_count(conn); +} + +static void iscsit_logout_post_handler_diffcid( + struct iscsit_conn *conn, + u16 cid) +{ + struct iscsit_conn *l_conn; + struct iscsit_session *sess = conn->sess; + bool conn_found = false; + + if (!sess) + return; + + spin_lock_bh(&sess->conn_lock); + list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { + if (l_conn->cid == cid) { + iscsit_inc_conn_usage_count(l_conn); + conn_found = true; + break; + } + } + spin_unlock_bh(&sess->conn_lock); + + if (!conn_found) + return; + + if (l_conn->sock) + l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN); + + spin_lock_bh(&l_conn->state_lock); + pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); + l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; + spin_unlock_bh(&l_conn->state_lock); + + iscsit_cause_connection_reinstatement(l_conn, 1); + iscsit_dec_conn_usage_count(l_conn); +} + +/* + * Return of 0 causes the TX thread to restart. + */ +int iscsit_logout_post_handler( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + int ret = 0; + + switch (cmd->logout_reason) { + case ISCSI_LOGOUT_REASON_CLOSE_SESSION: + switch (cmd->logout_response) { + case ISCSI_LOGOUT_SUCCESS: + case ISCSI_LOGOUT_CLEANUP_FAILED: + default: + iscsit_logout_post_handler_closesession(conn); + break; + } + break; + case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: + if (conn->cid == cmd->logout_cid) { + switch (cmd->logout_response) { + case ISCSI_LOGOUT_SUCCESS: + case ISCSI_LOGOUT_CLEANUP_FAILED: + default: + iscsit_logout_post_handler_samecid(conn); + break; + } + } else { + switch (cmd->logout_response) { + case ISCSI_LOGOUT_SUCCESS: + iscsit_logout_post_handler_diffcid(conn, + cmd->logout_cid); + break; + case ISCSI_LOGOUT_CID_NOT_FOUND: + case ISCSI_LOGOUT_CLEANUP_FAILED: + default: + break; + } + ret = 1; + } + break; + case ISCSI_LOGOUT_REASON_RECOVERY: + switch (cmd->logout_response) { + case ISCSI_LOGOUT_SUCCESS: + case ISCSI_LOGOUT_CID_NOT_FOUND: + case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: + case ISCSI_LOGOUT_CLEANUP_FAILED: + default: + break; + } + ret = 1; + break; + default: + break; + + } + return ret; +} +EXPORT_SYMBOL(iscsit_logout_post_handler); + +void iscsit_fail_session(struct iscsit_session *sess) +{ + struct iscsit_conn *conn; + + spin_lock_bh(&sess->conn_lock); + list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { + pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); + conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; + } + spin_unlock_bh(&sess->conn_lock); + + pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); + sess->session_state = TARG_SESS_STATE_FAILED; +} + +void iscsit_stop_session( + struct iscsit_session *sess, + int session_sleep, + int connection_sleep) +{ + u16 conn_count = atomic_read(&sess->nconn); + struct iscsit_conn *conn, *conn_tmp = NULL; + int is_last; + + spin_lock_bh(&sess->conn_lock); + + if (connection_sleep) { + list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, + conn_list) { + if (conn_count == 0) + break; + + if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { + is_last = 1; + } else { + iscsit_inc_conn_usage_count(conn_tmp); + is_last = 0; + } + iscsit_inc_conn_usage_count(conn); + + spin_unlock_bh(&sess->conn_lock); + iscsit_cause_connection_reinstatement(conn, 1); + spin_lock_bh(&sess->conn_lock); + + iscsit_dec_conn_usage_count(conn); + if (is_last == 0) + iscsit_dec_conn_usage_count(conn_tmp); + conn_count--; + } + } else { + list_for_each_entry(conn, &sess->sess_conn_list, conn_list) + iscsit_cause_connection_reinstatement(conn, 0); + } + + if (session_sleep && atomic_read(&sess->nconn)) { + spin_unlock_bh(&sess->conn_lock); + wait_for_completion(&sess->session_wait_comp); + } else + spin_unlock_bh(&sess->conn_lock); +} + +int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) +{ + struct iscsit_session *sess; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + struct se_session *se_sess, *se_sess_tmp; + LIST_HEAD(free_list); + int session_count = 0; + + spin_lock_bh(&se_tpg->session_lock); + if (tpg->nsessions && !force) { + spin_unlock_bh(&se_tpg->session_lock); + return -1; + } + + list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, + sess_list) { + sess = (struct iscsit_session *)se_sess->fabric_sess_ptr; + + spin_lock(&sess->conn_lock); + if (atomic_read(&sess->session_fall_back_to_erl0) || + atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || + (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess->conn_lock); + continue; + } + iscsit_inc_session_usage_count(sess); + atomic_set(&sess->session_reinstatement, 1); + atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); + spin_unlock(&sess->conn_lock); + + list_move_tail(&se_sess->sess_list, &free_list); + } + spin_unlock_bh(&se_tpg->session_lock); + + list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { + sess = (struct iscsit_session *)se_sess->fabric_sess_ptr; + + list_del_init(&se_sess->sess_list); + iscsit_stop_session(sess, 1, 1); + iscsit_dec_session_usage_count(sess); + session_count++; + } + + pr_debug("Released %d iSCSI Session(s) from Target Portal" + " Group: %hu\n", session_count, tpg->tpgt); + return 0; +} + +MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure"); +MODULE_VERSION("4.1.x"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(iscsi_target_init_module); +module_exit(iscsi_target_cleanup_module); diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h new file mode 100644 index 0000000000..0c997a08ad --- /dev/null +++ b/drivers/target/iscsi/iscsi_target.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_H +#define ISCSI_TARGET_H + +#include <linux/types.h> +#include <linux/spinlock.h> + +struct iscsit_cmd; +struct iscsit_conn; +struct iscsi_np; +struct iscsi_portal_group; +struct iscsit_session; +struct iscsi_tpg_np; +struct kref; +struct sockaddr_storage; + +extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *); +extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int); +extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *); +extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *); +extern void iscsit_del_tiqn(struct iscsi_tiqn *); +extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *); +extern void iscsit_login_kref_put(struct kref *); +extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *, + struct iscsi_tpg_np *); +extern bool iscsit_check_np_match(struct sockaddr_storage *, + struct iscsi_np *, int); +extern struct iscsi_np *iscsit_add_np(struct sockaddr_storage *, + int); +extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *, + struct iscsi_portal_group *, bool); +extern int iscsit_del_np(struct iscsi_np *); +extern int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8, unsigned char *); +extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *); +extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *); +extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *); +extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *); +extern int iscsit_send_async_msg(struct iscsit_conn *, u16, u8, u8); +extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery); +extern void iscsit_thread_get_cpumask(struct iscsit_conn *); +extern int iscsi_target_tx_thread(void *); +extern int iscsi_target_rx_thread(void *); +extern int iscsit_close_connection(struct iscsit_conn *); +extern int iscsit_close_session(struct iscsit_session *, bool can_sleep); +extern void iscsit_fail_session(struct iscsit_session *); +extern void iscsit_stop_session(struct iscsit_session *, int, int); +extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); + +extern struct iscsit_global *iscsit_global; +extern const struct target_core_fabric_ops iscsi_ops; + +extern struct kmem_cache *lio_dr_cache; +extern struct kmem_cache *lio_ooo_cache; +extern struct kmem_cache *lio_qr_cache; +extern struct kmem_cache *lio_r2t_cache; + +extern struct ida sess_ida; +extern struct mutex auth_id_lock; + +#endif /*** ISCSI_TARGET_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c new file mode 100644 index 0000000000..c8a248bd11 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file houses the main functions for the iSCSI CHAP support + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <crypto/hash.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/err.h> +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_nego.h" +#include "iscsi_target_auth.h" + +static char *chap_get_digest_name(const int digest_type) +{ + switch (digest_type) { + case CHAP_DIGEST_MD5: + return "md5"; + case CHAP_DIGEST_SHA1: + return "sha1"; + case CHAP_DIGEST_SHA256: + return "sha256"; + case CHAP_DIGEST_SHA3_256: + return "sha3-256"; + default: + return NULL; + } +} + +static int chap_gen_challenge( + struct iscsit_conn *conn, + int caller, + char *c_str, + unsigned int *c_len) +{ + int ret; + unsigned char *challenge_asciihex; + struct iscsi_chap *chap = conn->auth_protocol; + + challenge_asciihex = kzalloc(chap->challenge_len * 2 + 1, GFP_KERNEL); + if (!challenge_asciihex) + return -ENOMEM; + + memset(chap->challenge, 0, MAX_CHAP_CHALLENGE_LEN); + + ret = get_random_bytes_wait(chap->challenge, chap->challenge_len); + if (unlikely(ret)) + goto out; + + bin2hex(challenge_asciihex, chap->challenge, + chap->challenge_len); + /* + * Set CHAP_C, and copy the generated challenge into c_str. + */ + *c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex); + *c_len += 1; + + pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client", + challenge_asciihex); + +out: + kfree(challenge_asciihex); + return ret; +} + +static int chap_test_algorithm(const char *name) +{ + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash(name, 0, 0); + if (IS_ERR(tfm)) + return -1; + + crypto_free_shash(tfm); + return 0; +} + +static int chap_check_algorithm(const char *a_str) +{ + char *tmp, *orig, *token, *digest_name; + long digest_type; + int r = CHAP_DIGEST_UNKNOWN; + + tmp = kstrdup(a_str, GFP_KERNEL); + if (!tmp) { + pr_err("Memory allocation failed for CHAP_A temporary buffer\n"); + return CHAP_DIGEST_UNKNOWN; + } + orig = tmp; + + token = strsep(&tmp, "="); + if (!token) + goto out; + + if (strcmp(token, "CHAP_A")) { + pr_err("Unable to locate CHAP_A key\n"); + goto out; + } + while (token) { + token = strsep(&tmp, ","); + if (!token) + goto out; + + if (kstrtol(token, 10, &digest_type)) + continue; + + digest_name = chap_get_digest_name(digest_type); + if (!digest_name) + continue; + + pr_debug("Selected %s Algorithm\n", digest_name); + if (chap_test_algorithm(digest_name) < 0) { + pr_err("failed to allocate %s algo\n", digest_name); + } else { + r = digest_type; + goto out; + } + } +out: + kfree(orig); + return r; +} + +static void chap_close(struct iscsit_conn *conn) +{ + kfree(conn->auth_protocol); + conn->auth_protocol = NULL; +} + +static struct iscsi_chap *chap_server_open( + struct iscsit_conn *conn, + struct iscsi_node_auth *auth, + const char *a_str, + char *aic_str, + unsigned int *aic_len) +{ + int digest_type; + struct iscsi_chap *chap; + + if (!(auth->naf_flags & NAF_USERID_SET) || + !(auth->naf_flags & NAF_PASSWORD_SET)) { + pr_err("CHAP user or password not set for" + " Initiator ACL\n"); + return NULL; + } + + conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL); + if (!conn->auth_protocol) + return NULL; + + chap = conn->auth_protocol; + digest_type = chap_check_algorithm(a_str); + switch (digest_type) { + case CHAP_DIGEST_MD5: + chap->digest_size = MD5_SIGNATURE_SIZE; + break; + case CHAP_DIGEST_SHA1: + chap->digest_size = SHA1_SIGNATURE_SIZE; + break; + case CHAP_DIGEST_SHA256: + chap->digest_size = SHA256_SIGNATURE_SIZE; + break; + case CHAP_DIGEST_SHA3_256: + chap->digest_size = SHA3_256_SIGNATURE_SIZE; + break; + case CHAP_DIGEST_UNKNOWN: + default: + pr_err("Unsupported CHAP_A value\n"); + chap_close(conn); + return NULL; + } + + chap->digest_name = chap_get_digest_name(digest_type); + + /* Tie the challenge length to the digest size */ + chap->challenge_len = chap->digest_size; + + pr_debug("[server] Got CHAP_A=%d\n", digest_type); + *aic_len = sprintf(aic_str, "CHAP_A=%d", digest_type); + *aic_len += 1; + pr_debug("[server] Sending CHAP_A=%d\n", digest_type); + + /* + * Set Identifier. + */ + chap->id = conn->tpg->tpg_chap_id++; + *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id); + *aic_len += 1; + pr_debug("[server] Sending CHAP_I=%d\n", chap->id); + /* + * Generate Challenge. + */ + if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) { + chap_close(conn); + return NULL; + } + + return chap; +} + +static const char base64_lookup_table[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +static int chap_base64_decode(u8 *dst, const char *src, size_t len) +{ + int i, bits = 0, ac = 0; + const char *p; + u8 *cp = dst; + + for (i = 0; i < len; i++) { + if (src[i] == '=') + return cp - dst; + + p = strchr(base64_lookup_table, src[i]); + if (p == NULL || src[i] == 0) + return -2; + + ac <<= 6; + ac += (p - base64_lookup_table); + bits += 6; + if (bits >= 8) { + *cp++ = (ac >> (bits - 8)) & 0xff; + ac &= ~(BIT(16) - BIT(bits - 8)); + bits -= 8; + } + } + if (ac) + return -1; + + return cp - dst; +} + +static int chap_server_compute_hash( + struct iscsit_conn *conn, + struct iscsi_node_auth *auth, + char *nr_in_ptr, + char *nr_out_ptr, + unsigned int *nr_out_len) +{ + unsigned long id; + unsigned char id_as_uchar; + unsigned char type; + unsigned char identifier[10], *initiatorchg = NULL; + unsigned char *initiatorchg_binhex = NULL; + unsigned char *digest = NULL; + unsigned char *response = NULL; + unsigned char *client_digest = NULL; + unsigned char *server_digest = NULL; + unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; + size_t compare_len; + struct iscsi_chap *chap = conn->auth_protocol; + struct crypto_shash *tfm = NULL; + struct shash_desc *desc = NULL; + int auth_ret = -1, ret, initiatorchg_len; + + digest = kzalloc(chap->digest_size, GFP_KERNEL); + if (!digest) { + pr_err("Unable to allocate the digest buffer\n"); + goto out; + } + + response = kzalloc(chap->digest_size * 2 + 2, GFP_KERNEL); + if (!response) { + pr_err("Unable to allocate the response buffer\n"); + goto out; + } + + client_digest = kzalloc(chap->digest_size, GFP_KERNEL); + if (!client_digest) { + pr_err("Unable to allocate the client_digest buffer\n"); + goto out; + } + + server_digest = kzalloc(chap->digest_size, GFP_KERNEL); + if (!server_digest) { + pr_err("Unable to allocate the server_digest buffer\n"); + goto out; + } + + memset(identifier, 0, 10); + memset(chap_n, 0, MAX_CHAP_N_SIZE); + memset(chap_r, 0, MAX_RESPONSE_LENGTH); + + initiatorchg = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL); + if (!initiatorchg) { + pr_err("Unable to allocate challenge buffer\n"); + goto out; + } + + initiatorchg_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL); + if (!initiatorchg_binhex) { + pr_err("Unable to allocate initiatorchg_binhex buffer\n"); + goto out; + } + /* + * Extract CHAP_N. + */ + if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n, + &type) < 0) { + pr_err("Could not find CHAP_N.\n"); + goto out; + } + if (type == HEX) { + pr_err("Could not find CHAP_N.\n"); + goto out; + } + + /* Include the terminating NULL in the compare */ + compare_len = strlen(auth->userid) + 1; + if (strncmp(chap_n, auth->userid, compare_len) != 0) { + pr_err("CHAP_N values do not match!\n"); + goto out; + } + pr_debug("[server] Got CHAP_N=%s\n", chap_n); + /* + * Extract CHAP_R. + */ + if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r, + &type) < 0) { + pr_err("Could not find CHAP_R.\n"); + goto out; + } + + switch (type) { + case HEX: + if (strlen(chap_r) != chap->digest_size * 2) { + pr_err("Malformed CHAP_R\n"); + goto out; + } + if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) { + pr_err("Malformed CHAP_R: invalid HEX\n"); + goto out; + } + break; + case BASE64: + if (chap_base64_decode(client_digest, chap_r, strlen(chap_r)) != + chap->digest_size) { + pr_err("Malformed CHAP_R: invalid BASE64\n"); + goto out; + } + break; + default: + pr_err("Could not find CHAP_R\n"); + goto out; + } + + pr_debug("[server] Got CHAP_R=%s\n", chap_r); + + tfm = crypto_alloc_shash(chap->digest_name, 0, 0); + if (IS_ERR(tfm)) { + tfm = NULL; + pr_err("Unable to allocate struct crypto_shash\n"); + goto out; + } + + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + pr_err("Unable to allocate struct shash_desc\n"); + goto out; + } + + desc->tfm = tfm; + + ret = crypto_shash_init(desc); + if (ret < 0) { + pr_err("crypto_shash_init() failed\n"); + goto out; + } + + ret = crypto_shash_update(desc, &chap->id, 1); + if (ret < 0) { + pr_err("crypto_shash_update() failed for id\n"); + goto out; + } + + ret = crypto_shash_update(desc, (char *)&auth->password, + strlen(auth->password)); + if (ret < 0) { + pr_err("crypto_shash_update() failed for password\n"); + goto out; + } + + ret = crypto_shash_finup(desc, chap->challenge, + chap->challenge_len, server_digest); + if (ret < 0) { + pr_err("crypto_shash_finup() failed for challenge\n"); + goto out; + } + + bin2hex(response, server_digest, chap->digest_size); + pr_debug("[server] %s Server Digest: %s\n", + chap->digest_name, response); + + if (memcmp(server_digest, client_digest, chap->digest_size) != 0) { + pr_debug("[server] %s Digests do not match!\n\n", + chap->digest_name); + goto out; + } else + pr_debug("[server] %s Digests match, CHAP connection" + " successful.\n\n", chap->digest_name); + /* + * One way authentication has succeeded, return now if mutual + * authentication is not enabled. + */ + if (!auth->authenticate_target) { + auth_ret = 0; + goto out; + } + /* + * Get CHAP_I. + */ + ret = extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type); + if (ret == -ENOENT) { + pr_debug("Could not find CHAP_I. Initiator uses One way authentication.\n"); + auth_ret = 0; + goto out; + } + if (ret < 0) { + pr_err("Could not find CHAP_I.\n"); + goto out; + } + + if (type == HEX) + ret = kstrtoul(&identifier[2], 0, &id); + else + ret = kstrtoul(identifier, 0, &id); + + if (ret < 0) { + pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret); + goto out; + } + if (id > 255) { + pr_err("chap identifier: %lu greater than 255\n", id); + goto out; + } + /* + * RFC 1994 says Identifier is no more than octet (8 bits). + */ + pr_debug("[server] Got CHAP_I=%lu\n", id); + /* + * Get CHAP_C. + */ + if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN, + initiatorchg, &type) < 0) { + pr_err("Could not find CHAP_C.\n"); + goto out; + } + + switch (type) { + case HEX: + initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2); + if (!initiatorchg_len) { + pr_err("Unable to convert incoming challenge\n"); + goto out; + } + if (initiatorchg_len > 1024) { + pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); + goto out; + } + + if (hex2bin(initiatorchg_binhex, initiatorchg, + initiatorchg_len) < 0) { + pr_err("Malformed CHAP_C: invalid HEX\n"); + goto out; + } + break; + case BASE64: + initiatorchg_len = chap_base64_decode(initiatorchg_binhex, + initiatorchg, + strlen(initiatorchg)); + if (initiatorchg_len < 0) { + pr_err("Malformed CHAP_C: invalid BASE64\n"); + goto out; + } + if (!initiatorchg_len) { + pr_err("Unable to convert incoming challenge\n"); + goto out; + } + if (initiatorchg_len > 1024) { + pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); + goto out; + } + break; + default: + pr_err("Could not find CHAP_C.\n"); + goto out; + } + + pr_debug("[server] Got CHAP_C=%s\n", initiatorchg); + /* + * During mutual authentication, the CHAP_C generated by the + * initiator must not match the original CHAP_C generated by + * the target. + */ + if (initiatorchg_len == chap->challenge_len && + !memcmp(initiatorchg_binhex, chap->challenge, + initiatorchg_len)) { + pr_err("initiator CHAP_C matches target CHAP_C, failing" + " login attempt\n"); + goto out; + } + /* + * Generate CHAP_N and CHAP_R for mutual authentication. + */ + ret = crypto_shash_init(desc); + if (ret < 0) { + pr_err("crypto_shash_init() failed\n"); + goto out; + } + + /* To handle both endiannesses */ + id_as_uchar = id; + ret = crypto_shash_update(desc, &id_as_uchar, 1); + if (ret < 0) { + pr_err("crypto_shash_update() failed for id\n"); + goto out; + } + + ret = crypto_shash_update(desc, auth->password_mutual, + strlen(auth->password_mutual)); + if (ret < 0) { + pr_err("crypto_shash_update() failed for" + " password_mutual\n"); + goto out; + } + /* + * Convert received challenge to binary hex. + */ + ret = crypto_shash_finup(desc, initiatorchg_binhex, initiatorchg_len, + digest); + if (ret < 0) { + pr_err("crypto_shash_finup() failed for ma challenge\n"); + goto out; + } + + /* + * Generate CHAP_N and CHAP_R. + */ + *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual); + *nr_out_len += 1; + pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual); + /* + * Convert response from binary hex to ascii hext. + */ + bin2hex(response, digest, chap->digest_size); + *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", + response); + *nr_out_len += 1; + pr_debug("[server] Sending CHAP_R=0x%s\n", response); + auth_ret = 0; +out: + kfree_sensitive(desc); + if (tfm) + crypto_free_shash(tfm); + kfree(initiatorchg); + kfree(initiatorchg_binhex); + kfree(digest); + kfree(response); + kfree(server_digest); + kfree(client_digest); + return auth_ret; +} + +u32 chap_main_loop( + struct iscsit_conn *conn, + struct iscsi_node_auth *auth, + char *in_text, + char *out_text, + int *in_len, + int *out_len) +{ + struct iscsi_chap *chap = conn->auth_protocol; + + if (!chap) { + chap = chap_server_open(conn, auth, in_text, out_text, out_len); + if (!chap) + return 2; + chap->chap_state = CHAP_STAGE_SERVER_AIC; + return 0; + } else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) { + convert_null_to_semi(in_text, *in_len); + if (chap_server_compute_hash(conn, auth, in_text, out_text, + out_len) < 0) { + chap_close(conn); + return 2; + } + if (auth->authenticate_target) + chap->chap_state = CHAP_STAGE_SERVER_NR; + else + *out_len = 0; + chap_close(conn); + return 1; + } + + return 2; +} diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h new file mode 100644 index 0000000000..ceb9b77547 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_auth.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ISCSI_CHAP_H_ +#define _ISCSI_CHAP_H_ + +#include <linux/types.h> + +#define CHAP_DIGEST_UNKNOWN 0 +#define CHAP_DIGEST_MD5 5 +#define CHAP_DIGEST_SHA1 6 +#define CHAP_DIGEST_SHA256 7 +#define CHAP_DIGEST_SHA3_256 8 + +#define MAX_CHAP_CHALLENGE_LEN 32 +#define CHAP_CHALLENGE_STR_LEN 4096 +#define MAX_RESPONSE_LENGTH 128 /* sufficient for SHA3 256 */ +#define MAX_CHAP_N_SIZE 512 + +#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */ +#define SHA1_SIGNATURE_SIZE 20 /* 20 bytes in a SHA1 message digest */ +#define SHA256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA256 message digest */ +#define SHA3_256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA3 256 message digest */ + +#define CHAP_STAGE_CLIENT_A 1 +#define CHAP_STAGE_SERVER_AIC 2 +#define CHAP_STAGE_CLIENT_NR 3 +#define CHAP_STAGE_CLIENT_NRIC 4 +#define CHAP_STAGE_SERVER_NR 5 + +struct iscsi_node_auth; +struct iscsit_conn; + +extern u32 chap_main_loop(struct iscsit_conn *, struct iscsi_node_auth *, char *, char *, + int *, int *); + +struct iscsi_chap { + unsigned char id; + unsigned char challenge[MAX_CHAP_CHALLENGE_LEN]; + unsigned int challenge_len; + unsigned char *digest_name; + unsigned int digest_size; + unsigned int authenticate_target; + unsigned int chap_state; +} ____cacheline_aligned; + +#endif /*** _ISCSI_CHAP_H_ ***/ diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c new file mode 100644 index 0000000000..1cff6052e8 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -0,0 +1,1593 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains the configfs implementation for iSCSI Target mode + * from the LIO-Target Project. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ****************************************************************************/ + +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/export.h> +#include <linux/inet.h> +#include <linux/module.h> +#include <net/ipv6.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_parameters.h" +#include "iscsi_target_device.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_nodeattrib.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" +#include <target/iscsi/iscsi_target_stat.h> + + +/* Start items for lio_target_portal_cit */ + +static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item) +{ + return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np); +} + +static ssize_t lio_target_np_driver_show(struct config_item *item, char *page, + enum iscsit_transport_type type) +{ + struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); + struct iscsi_tpg_np *tpg_np_new; + ssize_t rb; + + tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type); + if (tpg_np_new) + rb = sysfs_emit(page, "1\n"); + else + rb = sysfs_emit(page, "0\n"); + + return rb; +} + +static ssize_t lio_target_np_driver_store(struct config_item *item, + const char *page, size_t count, enum iscsit_transport_type type, + const char *mod_name) +{ + struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); + struct iscsi_np *np; + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np_new = NULL; + u32 op; + int rc; + + rc = kstrtou32(page, 0, &op); + if (rc) + return rc; + if ((op != 1) && (op != 0)) { + pr_err("Illegal value for tpg_enable: %u\n", op); + return -EINVAL; + } + np = tpg_np->tpg_np; + if (!np) { + pr_err("Unable to locate struct iscsi_np from" + " struct iscsi_tpg_np\n"); + return -EINVAL; + } + + tpg = tpg_np->tpg; + if (iscsit_get_tpg(tpg) < 0) + return -EINVAL; + + if (op) { + if (strlen(mod_name)) { + rc = request_module(mod_name); + if (rc != 0) { + pr_warn("Unable to request_module for %s\n", + mod_name); + rc = 0; + } + } + + tpg_np_new = iscsit_tpg_add_network_portal(tpg, + &np->np_sockaddr, tpg_np, type); + if (IS_ERR(tpg_np_new)) { + rc = PTR_ERR(tpg_np_new); + goto out; + } + } else { + tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type); + if (tpg_np_new) { + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new); + if (rc < 0) + goto out; + } + } + + iscsit_put_tpg(tpg); + return count; +out: + iscsit_put_tpg(tpg); + return rc; +} + +static ssize_t lio_target_np_iser_show(struct config_item *item, char *page) +{ + return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND); +} + +static ssize_t lio_target_np_iser_store(struct config_item *item, + const char *page, size_t count) +{ + return lio_target_np_driver_store(item, page, count, + ISCSI_INFINIBAND, "ib_isert"); +} +CONFIGFS_ATTR(lio_target_np_, iser); + +static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page) +{ + return lio_target_np_driver_show(item, page, ISCSI_CXGBIT); +} + +static ssize_t lio_target_np_cxgbit_store(struct config_item *item, + const char *page, size_t count) +{ + return lio_target_np_driver_store(item, page, count, + ISCSI_CXGBIT, "cxgbit"); +} +CONFIGFS_ATTR(lio_target_np_, cxgbit); + +static struct configfs_attribute *lio_target_portal_attrs[] = { + &lio_target_np_attr_iser, + &lio_target_np_attr_cxgbit, + NULL, +}; + +/* Stop items for lio_target_portal_cit */ + +/* Start items for lio_target_np_cit */ + +#define MAX_PORTAL_LEN 256 + +static struct se_tpg_np *lio_target_call_addnptotpg( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np; + char *str, *str2, *ip_str, *port_str; + struct sockaddr_storage sockaddr = { }; + int ret; + char buf[MAX_PORTAL_LEN + 1] = { }; + + if (strlen(name) > MAX_PORTAL_LEN) { + pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n", + (int)strlen(name), MAX_PORTAL_LEN); + return ERR_PTR(-EOVERFLOW); + } + snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name); + + str = strstr(buf, "["); + if (str) { + str2 = strstr(str, "]"); + if (!str2) { + pr_err("Unable to locate trailing \"]\"" + " in IPv6 iSCSI network portal address\n"); + return ERR_PTR(-EINVAL); + } + + ip_str = str + 1; /* Skip over leading "[" */ + *str2 = '\0'; /* Terminate the unbracketed IPv6 address */ + str2++; /* Skip over the \0 */ + + port_str = strstr(str2, ":"); + if (!port_str) { + pr_err("Unable to locate \":port\"" + " in IPv6 iSCSI network portal address\n"); + return ERR_PTR(-EINVAL); + } + *port_str = '\0'; /* Terminate string for IP */ + port_str++; /* Skip over ":" */ + } else { + ip_str = &buf[0]; + port_str = strstr(ip_str, ":"); + if (!port_str) { + pr_err("Unable to locate \":port\"" + " in IPv4 iSCSI network portal address\n"); + return ERR_PTR(-EINVAL); + } + *port_str = '\0'; /* Terminate string for IP */ + port_str++; /* Skip over ":" */ + } + + ret = inet_pton_with_scope(&init_net, AF_UNSPEC, ip_str, + port_str, &sockaddr); + if (ret) { + pr_err("malformed ip/port passed: %s\n", name); + return ERR_PTR(ret); + } + + tpg = to_iscsi_tpg(se_tpg); + ret = iscsit_get_tpg(tpg); + if (ret < 0) + return ERR_PTR(-EINVAL); + + pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu" + " PORTAL: %s\n", + config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item), + tpg->tpgt, name); + /* + * Assume ISCSI_TCP by default. Other network portals for other + * iSCSI fabrics: + * + * Traditional iSCSI over SCTP (initial support) + * iSER/TCP (TODO, hardware available) + * iSER/SCTP (TODO, software emulation with osc-iwarp) + * iSER/IB (TODO, hardware available) + * + * can be enabled with attributes under + * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/ + * + */ + tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL, + ISCSI_TCP); + if (IS_ERR(tpg_np)) { + iscsit_put_tpg(tpg); + return ERR_CAST(tpg_np); + } + pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); + + iscsit_put_tpg(tpg); + return &tpg_np->se_tpg_np; +} + +static void lio_target_call_delnpfromtpg( + struct se_tpg_np *se_tpg_np) +{ + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np; + struct se_portal_group *se_tpg; + int ret; + + tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np); + tpg = tpg_np->tpg; + ret = iscsit_get_tpg(tpg); + if (ret < 0) + return; + + se_tpg = &tpg->tpg_se_tpg; + pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu" + " PORTAL: %pISpc\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item), + tpg->tpgt, &tpg_np->tpg_np->np_sockaddr); + + ret = iscsit_tpg_del_network_portal(tpg, tpg_np); + if (ret < 0) + goto out; + + pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n"); +out: + iscsit_put_tpg(tpg); +} + +/* End items for lio_target_np_cit */ + +/* Start items for lio_target_nacl_attrib_cit */ + +#define ISCSI_NACL_ATTR(name) \ +static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\ + char *page) \ +{ \ + struct se_node_acl *se_nacl = attrib_to_nacl(item); \ + struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \ + return sysfs_emit(page, "%u\n", nacl->node_attrib.name); \ +} \ + \ +static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\ + const char *page, size_t count) \ +{ \ + struct se_node_acl *se_nacl = attrib_to_nacl(item); \ + struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \ + u32 val; \ + int ret; \ + \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + return ret; \ + ret = iscsit_na_##name(nacl, val); \ + if (ret < 0) \ + return ret; \ + \ + return count; \ +} \ + \ +CONFIGFS_ATTR(iscsi_nacl_attrib_, name) + +ISCSI_NACL_ATTR(dataout_timeout); +ISCSI_NACL_ATTR(dataout_timeout_retries); +ISCSI_NACL_ATTR(default_erl); +ISCSI_NACL_ATTR(nopin_timeout); +ISCSI_NACL_ATTR(nopin_response_timeout); +ISCSI_NACL_ATTR(random_datain_pdu_offsets); +ISCSI_NACL_ATTR(random_datain_seq_offsets); +ISCSI_NACL_ATTR(random_r2t_offsets); + +static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item, + char *page) +{ + struct se_node_acl *se_nacl = attrib_to_nacl(item); + struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); + + return sysfs_emit(page, "%d\n", nacl->node_attrib.authentication); +} + +static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_node_acl *se_nacl = attrib_to_nacl(item); + struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); + s32 val; + int ret; + + ret = kstrtos32(page, 0, &val); + if (ret) + return ret; + if (val != 0 && val != 1 && val != NA_AUTHENTICATION_INHERITED) + return -EINVAL; + + nacl->node_attrib.authentication = val; + + return count; +} + +CONFIGFS_ATTR(iscsi_nacl_attrib_, authentication); + +static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = { + &iscsi_nacl_attrib_attr_dataout_timeout, + &iscsi_nacl_attrib_attr_dataout_timeout_retries, + &iscsi_nacl_attrib_attr_default_erl, + &iscsi_nacl_attrib_attr_nopin_timeout, + &iscsi_nacl_attrib_attr_nopin_response_timeout, + &iscsi_nacl_attrib_attr_random_datain_pdu_offsets, + &iscsi_nacl_attrib_attr_random_datain_seq_offsets, + &iscsi_nacl_attrib_attr_random_r2t_offsets, + &iscsi_nacl_attrib_attr_authentication, + NULL, +}; + +/* End items for lio_target_nacl_attrib_cit */ + +/* Start items for lio_target_nacl_auth_cit */ + +#define __DEF_NACL_AUTH_STR(prefix, name, flags) \ +static ssize_t __iscsi_##prefix##_##name##_show( \ + struct iscsi_node_acl *nacl, \ + char *page) \ +{ \ + struct iscsi_node_auth *auth = &nacl->node_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \ +} \ + \ +static ssize_t __iscsi_##prefix##_##name##_store( \ + struct iscsi_node_acl *nacl, \ + const char *page, \ + size_t count) \ +{ \ + struct iscsi_node_auth *auth = &nacl->node_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + if (count >= sizeof(auth->name)) \ + return -EINVAL; \ + snprintf(auth->name, sizeof(auth->name), "%s", page); \ + if (!strncmp("NULL", auth->name, 4)) \ + auth->naf_flags &= ~flags; \ + else \ + auth->naf_flags |= flags; \ + \ + if ((auth->naf_flags & NAF_USERID_IN_SET) && \ + (auth->naf_flags & NAF_PASSWORD_IN_SET)) \ + auth->authenticate_target = 1; \ + else \ + auth->authenticate_target = 0; \ + \ + return count; \ +} + +#define DEF_NACL_AUTH_STR(name, flags) \ + __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \ +static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct se_node_acl *nacl = auth_to_nacl(item); \ + return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \ +} \ +static ssize_t iscsi_nacl_auth_##name##_store(struct config_item *item, \ + const char *page, size_t count) \ +{ \ + struct se_node_acl *nacl = auth_to_nacl(item); \ + return __iscsi_nacl_auth_##name##_store(to_iscsi_nacl(nacl), \ + page, count); \ +} \ + \ +CONFIGFS_ATTR(iscsi_nacl_auth_, name) + +/* + * One-way authentication userid + */ +DEF_NACL_AUTH_STR(userid, NAF_USERID_SET); +DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET); +DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET); +DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET); + +#define __DEF_NACL_AUTH_INT(prefix, name) \ +static ssize_t __iscsi_##prefix##_##name##_show( \ + struct iscsi_node_acl *nacl, \ + char *page) \ +{ \ + struct iscsi_node_auth *auth = &nacl->node_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \ +} + +#define DEF_NACL_AUTH_INT(name) \ + __DEF_NACL_AUTH_INT(nacl_auth, name) \ +static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct se_node_acl *nacl = auth_to_nacl(item); \ + return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \ +} \ + \ +CONFIGFS_ATTR_RO(iscsi_nacl_auth_, name) + +DEF_NACL_AUTH_INT(authenticate_target); + +static struct configfs_attribute *lio_target_nacl_auth_attrs[] = { + &iscsi_nacl_auth_attr_userid, + &iscsi_nacl_auth_attr_password, + &iscsi_nacl_auth_attr_authenticate_target, + &iscsi_nacl_auth_attr_userid_mutual, + &iscsi_nacl_auth_attr_password_mutual, + NULL, +}; + +/* End items for lio_target_nacl_auth_cit */ + +/* Start items for lio_target_nacl_param_cit */ + +#define ISCSI_NACL_PARAM(name) \ +static ssize_t iscsi_nacl_param_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct se_node_acl *se_nacl = param_to_nacl(item); \ + struct iscsit_session *sess; \ + struct se_session *se_sess; \ + ssize_t rb; \ + \ + spin_lock_bh(&se_nacl->nacl_sess_lock); \ + se_sess = se_nacl->nacl_sess; \ + if (!se_sess) { \ + rb = snprintf(page, PAGE_SIZE, \ + "No Active iSCSI Session\n"); \ + } else { \ + sess = se_sess->fabric_sess_ptr; \ + rb = snprintf(page, PAGE_SIZE, "%u\n", \ + (u32)sess->sess_ops->name); \ + } \ + spin_unlock_bh(&se_nacl->nacl_sess_lock); \ + \ + return rb; \ +} \ + \ +CONFIGFS_ATTR_RO(iscsi_nacl_param_, name) + +ISCSI_NACL_PARAM(MaxConnections); +ISCSI_NACL_PARAM(InitialR2T); +ISCSI_NACL_PARAM(ImmediateData); +ISCSI_NACL_PARAM(MaxBurstLength); +ISCSI_NACL_PARAM(FirstBurstLength); +ISCSI_NACL_PARAM(DefaultTime2Wait); +ISCSI_NACL_PARAM(DefaultTime2Retain); +ISCSI_NACL_PARAM(MaxOutstandingR2T); +ISCSI_NACL_PARAM(DataPDUInOrder); +ISCSI_NACL_PARAM(DataSequenceInOrder); +ISCSI_NACL_PARAM(ErrorRecoveryLevel); + +static struct configfs_attribute *lio_target_nacl_param_attrs[] = { + &iscsi_nacl_param_attr_MaxConnections, + &iscsi_nacl_param_attr_InitialR2T, + &iscsi_nacl_param_attr_ImmediateData, + &iscsi_nacl_param_attr_MaxBurstLength, + &iscsi_nacl_param_attr_FirstBurstLength, + &iscsi_nacl_param_attr_DefaultTime2Wait, + &iscsi_nacl_param_attr_DefaultTime2Retain, + &iscsi_nacl_param_attr_MaxOutstandingR2T, + &iscsi_nacl_param_attr_DataPDUInOrder, + &iscsi_nacl_param_attr_DataSequenceInOrder, + &iscsi_nacl_param_attr_ErrorRecoveryLevel, + NULL, +}; + +/* End items for lio_target_nacl_param_cit */ + +/* Start items for lio_target_acl_cit */ + +static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + struct iscsit_session *sess; + struct iscsit_conn *conn; + struct se_session *se_sess; + ssize_t rb = 0; + u32 max_cmd_sn; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (!se_sess) { + rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator" + " Endpoint: %s\n", se_nacl->initiatorname); + } else { + sess = se_sess->fabric_sess_ptr; + + rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n", + sess->sess_ops->InitiatorName); + rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n", + sess->sess_ops->InitiatorAlias); + + rb += sysfs_emit_at(page, rb, + "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ", + sess->sid, sess->isid, sess->tsih); + rb += sysfs_emit_at(page, rb, "SessionType: %s\n", + (sess->sess_ops->SessionType) ? + "Discovery" : "Normal"); + rb += sysfs_emit_at(page, rb, "Session State: "); + switch (sess->session_state) { + case TARG_SESS_STATE_FREE: + rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n"); + break; + case TARG_SESS_STATE_ACTIVE: + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n"); + break; + case TARG_SESS_STATE_LOGGED_IN: + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n"); + break; + case TARG_SESS_STATE_FAILED: + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n"); + break; + case TARG_SESS_STATE_IN_CONTINUE: + rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n"); + break; + default: + rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session" + " State!\n"); + break; + } + + rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session" + " Values]-----------------------\n"); + rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" + " : MaxCmdSN : ITT : TTT\n"); + max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn); + rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x" + " 0x%08x 0x%08x\n", + sess->cmdsn_window, + (max_cmd_sn - sess->exp_cmd_sn) + 1, + sess->exp_cmd_sn, max_cmd_sn, + sess->init_task_tag, sess->targ_xfer_tag); + rb += sysfs_emit_at(page, rb, "----------------------[iSCSI" + " Connections]-------------------------\n"); + + spin_lock(&sess->conn_lock); + list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { + rb += sysfs_emit_at(page, rb, "CID: %hu Connection" + " State: ", conn->cid); + switch (conn->conn_state) { + case TARG_CONN_STATE_FREE: + rb += sysfs_emit_at(page, rb, + "TARG_CONN_STATE_FREE\n"); + break; + case TARG_CONN_STATE_XPT_UP: + rb += sysfs_emit_at(page, rb, + "TARG_CONN_STATE_XPT_UP\n"); + break; + case TARG_CONN_STATE_IN_LOGIN: + rb += sysfs_emit_at(page, rb, + "TARG_CONN_STATE_IN_LOGIN\n"); + break; + case TARG_CONN_STATE_LOGGED_IN: + rb += sysfs_emit_at(page, rb, + "TARG_CONN_STATE_LOGGED_IN\n"); + break; + case TARG_CONN_STATE_IN_LOGOUT: + rb += sysfs_emit_at(page, rb, + "TARG_CONN_STATE_IN_LOGOUT\n"); + break; + case TARG_CONN_STATE_LOGOUT_REQUESTED: + rb += sysfs_emit_at(page, rb, + "TARG_CONN_STATE_LOGOUT_REQUESTED\n"); + break; + case TARG_CONN_STATE_CLEANUP_WAIT: + rb += sysfs_emit_at(page, rb, + "TARG_CONN_STATE_CLEANUP_WAIT\n"); + break; + default: + rb += sysfs_emit_at(page, rb, + "ERROR: Unknown Connection State!\n"); + break; + } + + rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr, + (conn->network_transport == ISCSI_TCP) ? + "TCP" : "SCTP"); + rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n", + conn->stat_sn); + } + spin_unlock(&sess->conn_lock); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return rb; +} + +static ssize_t lio_target_nacl_cmdsn_depth_show(struct config_item *item, + char *page) +{ + return sysfs_emit(page, "%u\n", acl_to_nacl(item)->queue_depth); +} + +static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + struct se_portal_group *se_tpg = se_nacl->se_tpg; + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); + struct config_item *acl_ci, *tpg_ci, *wwn_ci; + u32 cmdsn_depth = 0; + int ret; + + ret = kstrtou32(page, 0, &cmdsn_depth); + if (ret) + return ret; + if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) { + pr_err("Passed cmdsn_depth: %u exceeds" + " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth, + TA_DEFAULT_CMDSN_DEPTH_MAX); + return -EINVAL; + } + acl_ci = &se_nacl->acl_group.cg_item; + if (!acl_ci) { + pr_err("Unable to locatel acl_ci\n"); + return -EINVAL; + } + tpg_ci = &acl_ci->ci_parent->ci_group->cg_item; + if (!tpg_ci) { + pr_err("Unable to locate tpg_ci\n"); + return -EINVAL; + } + wwn_ci = &tpg_ci->ci_group->cg_item; + if (!wwn_ci) { + pr_err("Unable to locate config_item wwn_ci\n"); + return -EINVAL; + } + + if (iscsit_get_tpg(tpg) < 0) + return -EINVAL; + + ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth); + + pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for" + "InitiatorName: %s\n", config_item_name(wwn_ci), + config_item_name(tpg_ci), cmdsn_depth, + config_item_name(acl_ci)); + + iscsit_put_tpg(tpg); + return (!ret) ? count : (ssize_t)ret; +} + +static ssize_t lio_target_nacl_tag_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag); +} + +static ssize_t lio_target_nacl_tag_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + int ret; + + ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page); + + if (ret < 0) + return ret; + return count; +} + +CONFIGFS_ATTR_RO(lio_target_nacl_, info); +CONFIGFS_ATTR(lio_target_nacl_, cmdsn_depth); +CONFIGFS_ATTR(lio_target_nacl_, tag); + +static struct configfs_attribute *lio_target_initiator_attrs[] = { + &lio_target_nacl_attr_info, + &lio_target_nacl_attr_cmdsn_depth, + &lio_target_nacl_attr_tag, + NULL, +}; + +static int lio_target_init_nodeacl(struct se_node_acl *se_nacl, + const char *name) +{ + struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl); + + config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group, + "iscsi_sess_stats", &iscsi_stat_sess_cit); + configfs_add_default_group(&acl->node_stat_grps.iscsi_sess_stats_group, + &se_nacl->acl_fabric_stat_group); + return 0; +} + +/* End items for lio_target_acl_cit */ + +/* Start items for lio_target_tpg_attrib_cit */ + +#define DEF_TPG_ATTRIB(name) \ + \ +static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct se_portal_group *se_tpg = attrib_to_tpg(item); \ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \ + ssize_t rb; \ + \ + if (iscsit_get_tpg(tpg) < 0) \ + return -EINVAL; \ + \ + rb = sysfs_emit(page, "%u\n", tpg->tpg_attrib.name); \ + iscsit_put_tpg(tpg); \ + return rb; \ +} \ + \ +static ssize_t iscsi_tpg_attrib_##name##_store(struct config_item *item,\ + const char *page, size_t count) \ +{ \ + struct se_portal_group *se_tpg = attrib_to_tpg(item); \ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \ + u32 val; \ + int ret; \ + \ + if (iscsit_get_tpg(tpg) < 0) \ + return -EINVAL; \ + \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + goto out; \ + ret = iscsit_ta_##name(tpg, val); \ + if (ret < 0) \ + goto out; \ + \ + iscsit_put_tpg(tpg); \ + return count; \ +out: \ + iscsit_put_tpg(tpg); \ + return ret; \ +} \ +CONFIGFS_ATTR(iscsi_tpg_attrib_, name) + +DEF_TPG_ATTRIB(authentication); +DEF_TPG_ATTRIB(login_timeout); +DEF_TPG_ATTRIB(generate_node_acls); +DEF_TPG_ATTRIB(default_cmdsn_depth); +DEF_TPG_ATTRIB(cache_dynamic_acls); +DEF_TPG_ATTRIB(demo_mode_write_protect); +DEF_TPG_ATTRIB(prod_mode_write_protect); +DEF_TPG_ATTRIB(demo_mode_discovery); +DEF_TPG_ATTRIB(default_erl); +DEF_TPG_ATTRIB(t10_pi); +DEF_TPG_ATTRIB(fabric_prot_type); +DEF_TPG_ATTRIB(tpg_enabled_sendtargets); +DEF_TPG_ATTRIB(login_keys_workaround); + +static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { + &iscsi_tpg_attrib_attr_authentication, + &iscsi_tpg_attrib_attr_login_timeout, + &iscsi_tpg_attrib_attr_generate_node_acls, + &iscsi_tpg_attrib_attr_default_cmdsn_depth, + &iscsi_tpg_attrib_attr_cache_dynamic_acls, + &iscsi_tpg_attrib_attr_demo_mode_write_protect, + &iscsi_tpg_attrib_attr_prod_mode_write_protect, + &iscsi_tpg_attrib_attr_demo_mode_discovery, + &iscsi_tpg_attrib_attr_default_erl, + &iscsi_tpg_attrib_attr_t10_pi, + &iscsi_tpg_attrib_attr_fabric_prot_type, + &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets, + &iscsi_tpg_attrib_attr_login_keys_workaround, + NULL, +}; + +/* End items for lio_target_tpg_attrib_cit */ + +/* Start items for lio_target_tpg_auth_cit */ + +#define __DEF_TPG_AUTH_STR(prefix, name, flags) \ +static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \ + char *page) \ +{ \ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \ + struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \ +} \ + \ +static ssize_t __iscsi_##prefix##_##name##_store(struct se_portal_group *se_tpg,\ + const char *page, size_t count) \ +{ \ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \ + struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + snprintf(auth->name, sizeof(auth->name), "%s", page); \ + if (!(strncmp("NULL", auth->name, 4))) \ + auth->naf_flags &= ~flags; \ + else \ + auth->naf_flags |= flags; \ + \ + if ((auth->naf_flags & NAF_USERID_IN_SET) && \ + (auth->naf_flags & NAF_PASSWORD_IN_SET)) \ + auth->authenticate_target = 1; \ + else \ + auth->authenticate_target = 0; \ + \ + return count; \ +} + +#define DEF_TPG_AUTH_STR(name, flags) \ + __DEF_TPG_AUTH_STR(tpg_auth, name, flags) \ +static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page); \ +} \ + \ +static ssize_t iscsi_tpg_auth_##name##_store(struct config_item *item, \ + const char *page, size_t count) \ +{ \ + return __iscsi_tpg_auth_##name##_store(auth_to_tpg(item), page, count); \ +} \ + \ +CONFIGFS_ATTR(iscsi_tpg_auth_, name); + + +DEF_TPG_AUTH_STR(userid, NAF_USERID_SET); +DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET); +DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET); +DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET); + +#define __DEF_TPG_AUTH_INT(prefix, name) \ +static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \ + char *page) \ +{ \ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \ + struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \ +} + +#define DEF_TPG_AUTH_INT(name) \ + __DEF_TPG_AUTH_INT(tpg_auth, name) \ +static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page); \ +} \ +CONFIGFS_ATTR_RO(iscsi_tpg_auth_, name); + +DEF_TPG_AUTH_INT(authenticate_target); + +static struct configfs_attribute *lio_target_tpg_auth_attrs[] = { + &iscsi_tpg_auth_attr_userid, + &iscsi_tpg_auth_attr_password, + &iscsi_tpg_auth_attr_authenticate_target, + &iscsi_tpg_auth_attr_userid_mutual, + &iscsi_tpg_auth_attr_password_mutual, + NULL, +}; + +/* End items for lio_target_tpg_auth_cit */ + +/* Start items for lio_target_tpg_param_cit */ + +#define DEF_TPG_PARAM(name) \ +static ssize_t iscsi_tpg_param_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct se_portal_group *se_tpg = param_to_tpg(item); \ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \ + struct iscsi_param *param; \ + ssize_t rb; \ + \ + if (iscsit_get_tpg(tpg) < 0) \ + return -EINVAL; \ + \ + param = iscsi_find_param_from_key(__stringify(name), \ + tpg->param_list); \ + if (!param) { \ + iscsit_put_tpg(tpg); \ + return -EINVAL; \ + } \ + rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \ + \ + iscsit_put_tpg(tpg); \ + return rb; \ +} \ +static ssize_t iscsi_tpg_param_##name##_store(struct config_item *item, \ + const char *page, size_t count) \ +{ \ + struct se_portal_group *se_tpg = param_to_tpg(item); \ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \ + char *buf; \ + int ret, len; \ + \ + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \ + if (!buf) \ + return -ENOMEM; \ + len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \ + if (isspace(buf[len-1])) \ + buf[len-1] = '\0'; /* Kill newline */ \ + \ + if (iscsit_get_tpg(tpg) < 0) { \ + kfree(buf); \ + return -EINVAL; \ + } \ + \ + ret = iscsi_change_param_value(buf, tpg->param_list, 1); \ + if (ret < 0) \ + goto out; \ + \ + kfree(buf); \ + iscsit_put_tpg(tpg); \ + return count; \ +out: \ + kfree(buf); \ + iscsit_put_tpg(tpg); \ + return -EINVAL; \ +} \ +CONFIGFS_ATTR(iscsi_tpg_param_, name) + +DEF_TPG_PARAM(AuthMethod); +DEF_TPG_PARAM(HeaderDigest); +DEF_TPG_PARAM(DataDigest); +DEF_TPG_PARAM(MaxConnections); +DEF_TPG_PARAM(TargetAlias); +DEF_TPG_PARAM(InitialR2T); +DEF_TPG_PARAM(ImmediateData); +DEF_TPG_PARAM(MaxRecvDataSegmentLength); +DEF_TPG_PARAM(MaxXmitDataSegmentLength); +DEF_TPG_PARAM(MaxBurstLength); +DEF_TPG_PARAM(FirstBurstLength); +DEF_TPG_PARAM(DefaultTime2Wait); +DEF_TPG_PARAM(DefaultTime2Retain); +DEF_TPG_PARAM(MaxOutstandingR2T); +DEF_TPG_PARAM(DataPDUInOrder); +DEF_TPG_PARAM(DataSequenceInOrder); +DEF_TPG_PARAM(ErrorRecoveryLevel); +DEF_TPG_PARAM(IFMarker); +DEF_TPG_PARAM(OFMarker); +DEF_TPG_PARAM(IFMarkInt); +DEF_TPG_PARAM(OFMarkInt); + +static struct configfs_attribute *lio_target_tpg_param_attrs[] = { + &iscsi_tpg_param_attr_AuthMethod, + &iscsi_tpg_param_attr_HeaderDigest, + &iscsi_tpg_param_attr_DataDigest, + &iscsi_tpg_param_attr_MaxConnections, + &iscsi_tpg_param_attr_TargetAlias, + &iscsi_tpg_param_attr_InitialR2T, + &iscsi_tpg_param_attr_ImmediateData, + &iscsi_tpg_param_attr_MaxRecvDataSegmentLength, + &iscsi_tpg_param_attr_MaxXmitDataSegmentLength, + &iscsi_tpg_param_attr_MaxBurstLength, + &iscsi_tpg_param_attr_FirstBurstLength, + &iscsi_tpg_param_attr_DefaultTime2Wait, + &iscsi_tpg_param_attr_DefaultTime2Retain, + &iscsi_tpg_param_attr_MaxOutstandingR2T, + &iscsi_tpg_param_attr_DataPDUInOrder, + &iscsi_tpg_param_attr_DataSequenceInOrder, + &iscsi_tpg_param_attr_ErrorRecoveryLevel, + &iscsi_tpg_param_attr_IFMarker, + &iscsi_tpg_param_attr_OFMarker, + &iscsi_tpg_param_attr_IFMarkInt, + &iscsi_tpg_param_attr_OFMarkInt, + NULL, +}; + +/* End items for lio_target_tpg_param_cit */ + +/* Start items for lio_target_tpg_cit */ + +static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item, + char *page) +{ + return target_show_dynamic_sessions(to_tpg(item), page); +} + +CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions); + +static struct configfs_attribute *lio_target_tpg_attrs[] = { + &lio_target_tpg_attr_dynamic_sessions, + NULL, +}; + +/* End items for lio_target_tpg_cit */ + +/* Start items for lio_target_tiqn_cit */ + +static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn, + const char *name) +{ + struct iscsi_portal_group *tpg; + struct iscsi_tiqn *tiqn; + char *tpgt_str; + int ret; + u16 tpgt; + + tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); + /* + * Only tpgt_# directory groups can be created below + * target/iscsi/iqn.superturodiskarry/ + */ + tpgt_str = strstr(name, "tpgt_"); + if (!tpgt_str) { + pr_err("Unable to locate \"tpgt_#\" directory" + " group\n"); + return NULL; + } + tpgt_str += 5; /* Skip ahead of "tpgt_" */ + ret = kstrtou16(tpgt_str, 0, &tpgt); + if (ret) + return NULL; + + tpg = iscsit_alloc_portal_group(tiqn, tpgt); + if (!tpg) + return NULL; + + ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI); + if (ret < 0) + goto free_out; + + ret = iscsit_tpg_add_portal_group(tiqn, tpg); + if (ret != 0) + goto out; + + pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); + pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n", + name); + return &tpg->tpg_se_tpg; +out: + core_tpg_deregister(&tpg->tpg_se_tpg); +free_out: + kfree(tpg); + return NULL; +} + +static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg, + bool enable) +{ + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); + int ret; + + ret = iscsit_get_tpg(tpg); + if (ret < 0) + return -EINVAL; + + if (enable) { + ret = iscsit_tpg_enable_portal_group(tpg); + if (ret < 0) + goto out; + } else { + /* + * iscsit_tpg_disable_portal_group() assumes force=1 + */ + ret = iscsit_tpg_disable_portal_group(tpg, 1); + if (ret < 0) + goto out; + } + + iscsit_put_tpg(tpg); + return 0; +out: + iscsit_put_tpg(tpg); + return -EINVAL; +} + +static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg) +{ + struct iscsi_portal_group *tpg; + struct iscsi_tiqn *tiqn; + + tpg = to_iscsi_tpg(se_tpg); + tiqn = tpg->tpg_tiqn; + /* + * iscsit_tpg_del_portal_group() assumes force=1 + */ + pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n"); + iscsit_tpg_del_portal_group(tiqn, tpg, 1); +} + +/* End items for lio_target_tiqn_cit */ + +/* Start LIO-Target TIQN struct contig_item lio_target_cit */ + +static ssize_t lio_target_wwn_lio_version_show(struct config_item *item, + char *page) +{ + return sysfs_emit(page, "Datera Inc. iSCSI Target %s\n", ISCSIT_VERSION); +} + +CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version); + +static ssize_t lio_target_wwn_cpus_allowed_list_show( + struct config_item *item, char *page) +{ + return sysfs_emit(page, "%*pbl\n", + cpumask_pr_args(iscsit_global->allowed_cpumask)); +} + +static ssize_t lio_target_wwn_cpus_allowed_list_store( + struct config_item *item, const char *page, size_t count) +{ + int ret = -ENOMEM; + char *orig; + cpumask_var_t new_allowed_cpumask; + + if (!zalloc_cpumask_var(&new_allowed_cpumask, GFP_KERNEL)) + goto out; + + orig = kstrdup(page, GFP_KERNEL); + if (!orig) + goto out_free_cpumask; + + ret = cpulist_parse(orig, new_allowed_cpumask); + if (!ret) + cpumask_copy(iscsit_global->allowed_cpumask, + new_allowed_cpumask); + + kfree(orig); +out_free_cpumask: + free_cpumask_var(new_allowed_cpumask); +out: + return ret ? ret : count; +} + +CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list); + +static struct configfs_attribute *lio_target_wwn_attrs[] = { + &lio_target_wwn_attr_lio_version, + &lio_target_wwn_attr_cpus_allowed_list, + NULL, +}; + +static struct se_wwn *lio_target_call_coreaddtiqn( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct iscsi_tiqn *tiqn; + + tiqn = iscsit_add_tiqn((unsigned char *)name); + if (IS_ERR(tiqn)) + return ERR_CAST(tiqn); + + pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); + pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:" + " %s\n", name); + return &tiqn->tiqn_wwn; +} + +static void lio_target_add_wwn_groups(struct se_wwn *wwn) +{ + struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); + + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, + "iscsi_instance", &iscsi_stat_instance_cit); + configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group, + &tiqn->tiqn_wwn.fabric_stat_group); + + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group, + "iscsi_sess_err", &iscsi_stat_sess_err_cit); + configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_sess_err_group, + &tiqn->tiqn_wwn.fabric_stat_group); + + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group, + "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); + configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group, + &tiqn->tiqn_wwn.fabric_stat_group); + + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group, + "iscsi_login_stats", &iscsi_stat_login_cit); + configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_login_stats_group, + &tiqn->tiqn_wwn.fabric_stat_group); + + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, + "iscsi_logout_stats", &iscsi_stat_logout_cit); + configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, + &tiqn->tiqn_wwn.fabric_stat_group); +} + +static void lio_target_call_coredeltiqn( + struct se_wwn *wwn) +{ + struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); + + pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n", + tiqn->tiqn); + iscsit_del_tiqn(tiqn); +} + +/* End LIO-Target TIQN struct contig_lio_target_cit */ + +/* Start lio_target_discovery_auth_cit */ + +#define DEF_DISC_AUTH_STR(name, flags) \ + __DEF_NACL_AUTH_STR(disc, name, flags) \ +static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \ +{ \ + return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl,\ + page); \ +} \ +static ssize_t iscsi_disc_##name##_store(struct config_item *item, \ + const char *page, size_t count) \ +{ \ + return __iscsi_disc_##name##_store(&iscsit_global->discovery_acl, \ + page, count); \ + \ +} \ +CONFIGFS_ATTR(iscsi_disc_, name) + +DEF_DISC_AUTH_STR(userid, NAF_USERID_SET); +DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET); +DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET); +DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET); + +#define DEF_DISC_AUTH_INT(name) \ + __DEF_NACL_AUTH_INT(disc, name) \ +static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \ +{ \ + return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl, \ + page); \ +} \ +CONFIGFS_ATTR_RO(iscsi_disc_, name) + +DEF_DISC_AUTH_INT(authenticate_target); + + +static ssize_t iscsi_disc_enforce_discovery_auth_show(struct config_item *item, + char *page) +{ + struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth; + + return sysfs_emit(page, "%d\n", discovery_auth->enforce_discovery_auth); +} + +static ssize_t iscsi_disc_enforce_discovery_auth_store(struct config_item *item, + const char *page, size_t count) +{ + struct iscsi_param *param; + struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg; + u32 op; + int err; + + err = kstrtou32(page, 0, &op); + if (err) + return -EINVAL; + if ((op != 1) && (op != 0)) { + pr_err("Illegal value for enforce_discovery_auth:" + " %u\n", op); + return -EINVAL; + } + + if (!discovery_tpg) { + pr_err("iscsit_global->discovery_tpg is NULL\n"); + return -EINVAL; + } + + param = iscsi_find_param_from_key(AUTHMETHOD, + discovery_tpg->param_list); + if (!param) + return -EINVAL; + + if (op) { + /* + * Reset the AuthMethod key to CHAP. + */ + if (iscsi_update_param_value(param, CHAP) < 0) + return -EINVAL; + + discovery_tpg->tpg_attrib.authentication = 1; + iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1; + pr_debug("LIO-CORE[0] Successfully enabled" + " authentication enforcement for iSCSI" + " Discovery TPG\n"); + } else { + /* + * Reset the AuthMethod key to CHAP,None + */ + if (iscsi_update_param_value(param, "CHAP,None") < 0) + return -EINVAL; + + discovery_tpg->tpg_attrib.authentication = 0; + iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0; + pr_debug("LIO-CORE[0] Successfully disabled" + " authentication enforcement for iSCSI" + " Discovery TPG\n"); + } + + return count; +} + +CONFIGFS_ATTR(iscsi_disc_, enforce_discovery_auth); + +static struct configfs_attribute *lio_target_discovery_auth_attrs[] = { + &iscsi_disc_attr_userid, + &iscsi_disc_attr_password, + &iscsi_disc_attr_authenticate_target, + &iscsi_disc_attr_userid_mutual, + &iscsi_disc_attr_password_mutual, + &iscsi_disc_attr_enforce_discovery_auth, + NULL, +}; + +/* End lio_target_discovery_auth_cit */ + +/* Start functions for target_core_fabric_ops */ + +static int iscsi_get_cmd_state(struct se_cmd *se_cmd) +{ + struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); + + return cmd->i_state; +} + +static u32 lio_sess_get_index(struct se_session *se_sess) +{ + struct iscsit_session *sess = se_sess->fabric_sess_ptr; + + return sess->session_index; +} + +static u32 lio_sess_get_initiator_sid( + struct se_session *se_sess, + unsigned char *buf, + u32 size) +{ + struct iscsit_session *sess = se_sess->fabric_sess_ptr; + /* + * iSCSI Initiator Session Identifier from RFC-3720. + */ + return snprintf(buf, size, "%6phN", sess->isid); +} + +static int lio_queue_data_in(struct se_cmd *se_cmd) +{ + struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); + struct iscsit_conn *conn = cmd->conn; + + cmd->i_state = ISTATE_SEND_DATAIN; + return conn->conn_transport->iscsit_queue_data_in(conn, cmd); +} + +static int lio_write_pending(struct se_cmd *se_cmd) +{ + struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); + struct iscsit_conn *conn = cmd->conn; + + if (!cmd->immediate_data && !cmd->unsolicited_data) + return conn->conn_transport->iscsit_get_dataout(conn, cmd, false); + + return 0; +} + +static int lio_queue_status(struct se_cmd *se_cmd) +{ + struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); + struct iscsit_conn *conn = cmd->conn; + + cmd->i_state = ISTATE_SEND_STATUS; + + if (cmd->se_cmd.scsi_status || cmd->sense_reason) { + return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + } + return conn->conn_transport->iscsit_queue_status(conn, cmd); +} + +static void lio_queue_tm_rsp(struct se_cmd *se_cmd) +{ + struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); + + cmd->i_state = ISTATE_SEND_TASKMGTRSP; + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); +} + +static void lio_aborted_task(struct se_cmd *se_cmd) +{ + struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); + + cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); +} + +static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn; +} + +static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpgt; +} + +static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth; +} + +static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls; +} + +static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls; +} + +static int lio_tpg_check_demo_mode_write_protect( + struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect; +} + +static int lio_tpg_check_prod_mode_write_protect( + struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect; +} + +static int lio_tpg_check_prot_fabric_only( + struct se_portal_group *se_tpg) +{ + /* + * Only report fabric_prot_type if t10_pi has also been enabled + * for incoming ib_isert sessions. + */ + if (!to_iscsi_tpg(se_tpg)->tpg_attrib.t10_pi) + return 0; + return to_iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type; +} + +/* + * This function calls iscsit_inc_session_usage_count() on the + * struct iscsit_session in question. + */ +static void lio_tpg_close_session(struct se_session *se_sess) +{ + struct iscsit_session *sess = se_sess->fabric_sess_ptr; + struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg; + + spin_lock_bh(&se_tpg->session_lock); + spin_lock(&sess->conn_lock); + if (atomic_read(&sess->session_fall_back_to_erl0) || + atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || + (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess->conn_lock); + spin_unlock_bh(&se_tpg->session_lock); + return; + } + iscsit_inc_session_usage_count(sess); + atomic_set(&sess->session_reinstatement, 1); + atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); + spin_unlock(&sess->conn_lock); + + iscsit_stop_time2retain_timer(sess); + spin_unlock_bh(&se_tpg->session_lock); + + iscsit_stop_session(sess, 1, 1); + iscsit_dec_session_usage_count(sess); +} + +static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index; +} + +static void lio_set_default_node_attributes(struct se_node_acl *se_acl) +{ + struct iscsi_node_acl *acl = to_iscsi_nacl(se_acl); + struct se_portal_group *se_tpg = se_acl->se_tpg; + struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); + + acl->node_attrib.nacl = acl; + iscsit_set_default_node_attribues(acl, tpg); +} + +static int lio_check_stop_free(struct se_cmd *se_cmd) +{ + return target_put_sess_cmd(se_cmd); +} + +static void lio_release_cmd(struct se_cmd *se_cmd) +{ + struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); + + pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd); + iscsit_release_cmd(cmd); +} + +const struct target_core_fabric_ops iscsi_ops = { + .module = THIS_MODULE, + .fabric_alias = "iscsi", + .fabric_name = "iSCSI", + .node_acl_size = sizeof(struct iscsi_node_acl), + .tpg_get_wwn = lio_tpg_get_endpoint_wwn, + .tpg_get_tag = lio_tpg_get_tag, + .tpg_get_default_depth = lio_tpg_get_default_depth, + .tpg_check_demo_mode = lio_tpg_check_demo_mode, + .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = + lio_tpg_check_demo_mode_write_protect, + .tpg_check_prod_mode_write_protect = + lio_tpg_check_prod_mode_write_protect, + .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only, + .tpg_get_inst_index = lio_tpg_get_inst_index, + .check_stop_free = lio_check_stop_free, + .release_cmd = lio_release_cmd, + .close_session = lio_tpg_close_session, + .sess_get_index = lio_sess_get_index, + .sess_get_initiator_sid = lio_sess_get_initiator_sid, + .write_pending = lio_write_pending, + .set_default_node_attributes = lio_set_default_node_attributes, + .get_cmd_state = iscsi_get_cmd_state, + .queue_data_in = lio_queue_data_in, + .queue_status = lio_queue_status, + .queue_tm_rsp = lio_queue_tm_rsp, + .aborted_task = lio_aborted_task, + .fabric_make_wwn = lio_target_call_coreaddtiqn, + .fabric_drop_wwn = lio_target_call_coredeltiqn, + .add_wwn_groups = lio_target_add_wwn_groups, + .fabric_make_tpg = lio_target_tiqn_addtpg, + .fabric_enable_tpg = lio_target_tiqn_enabletpg, + .fabric_drop_tpg = lio_target_tiqn_deltpg, + .fabric_make_np = lio_target_call_addnptotpg, + .fabric_drop_np = lio_target_call_delnpfromtpg, + .fabric_init_nodeacl = lio_target_init_nodeacl, + + .tfc_discovery_attrs = lio_target_discovery_auth_attrs, + .tfc_wwn_attrs = lio_target_wwn_attrs, + .tfc_tpg_base_attrs = lio_target_tpg_attrs, + .tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs, + .tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs, + .tfc_tpg_param_attrs = lio_target_tpg_param_attrs, + .tfc_tpg_np_base_attrs = lio_target_portal_attrs, + .tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs, + .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs, + .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs, + .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs, + + .write_pending_must_be_called = true, +}; diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c new file mode 100644 index 0000000000..2d44781be3 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_datain_values.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains the iSCSI Target DataIN value generation functions. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <scsi/iscsi_proto.h> +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_seq_pdu_list.h" +#include "iscsi_target_erl1.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" +#include "iscsi_target_datain_values.h" + +struct iscsi_datain_req *iscsit_allocate_datain_req(void) +{ + struct iscsi_datain_req *dr; + + dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC); + if (!dr) { + pr_err("Unable to allocate memory for" + " struct iscsi_datain_req\n"); + return NULL; + } + INIT_LIST_HEAD(&dr->cmd_datain_node); + + return dr; +} + +void iscsit_attach_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr) +{ + spin_lock(&cmd->datain_lock); + list_add_tail(&dr->cmd_datain_node, &cmd->datain_list); + spin_unlock(&cmd->datain_lock); +} + +void iscsit_free_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr) +{ + spin_lock(&cmd->datain_lock); + list_del(&dr->cmd_datain_node); + spin_unlock(&cmd->datain_lock); + + kmem_cache_free(lio_dr_cache, dr); +} + +void iscsit_free_all_datain_reqs(struct iscsit_cmd *cmd) +{ + struct iscsi_datain_req *dr, *dr_tmp; + + spin_lock(&cmd->datain_lock); + list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) { + list_del(&dr->cmd_datain_node); + kmem_cache_free(lio_dr_cache, dr); + } + spin_unlock(&cmd->datain_lock); +} + +struct iscsi_datain_req *iscsit_get_datain_req(struct iscsit_cmd *cmd) +{ + if (list_empty(&cmd->datain_list)) { + pr_err("cmd->datain_list is empty for ITT:" + " 0x%08x\n", cmd->init_task_tag); + return NULL; + } + + return list_first_entry(&cmd->datain_list, struct iscsi_datain_req, + cmd_datain_node); +} + +/* + * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes. + */ +static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes( + struct iscsit_cmd *cmd, + struct iscsi_datain *datain) +{ + u32 next_burst_len, read_data_done, read_data_left; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_datain_req *dr; + + dr = iscsit_get_datain_req(cmd); + if (!dr) + return NULL; + + if (dr->recovery && dr->generate_recovery_values) { + if (iscsit_create_recovery_datain_values_datasequenceinorder_yes( + cmd, dr) < 0) + return NULL; + + dr->generate_recovery_values = 0; + } + + next_burst_len = (!dr->recovery) ? + cmd->next_burst_len : dr->next_burst_len; + read_data_done = (!dr->recovery) ? + cmd->read_data_done : dr->read_data_done; + + read_data_left = (cmd->se_cmd.data_length - read_data_done); + if (!read_data_left) { + pr_err("ITT: 0x%08x read_data_left is zero!\n", + cmd->init_task_tag); + return NULL; + } + + if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) && + (read_data_left <= (conn->sess->sess_ops->MaxBurstLength - + next_burst_len))) { + datain->length = read_data_left; + + datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS); + if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) + datain->flags |= ISCSI_FLAG_DATA_ACK; + } else { + if ((next_burst_len + + conn->conn_ops->MaxRecvDataSegmentLength) < + conn->sess->sess_ops->MaxBurstLength) { + datain->length = + conn->conn_ops->MaxRecvDataSegmentLength; + next_burst_len += datain->length; + } else { + datain->length = (conn->sess->sess_ops->MaxBurstLength - + next_burst_len); + next_burst_len = 0; + + datain->flags |= ISCSI_FLAG_CMD_FINAL; + if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) + datain->flags |= ISCSI_FLAG_DATA_ACK; + } + } + + datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; + datain->offset = read_data_done; + + if (!dr->recovery) { + cmd->next_burst_len = next_burst_len; + cmd->read_data_done += datain->length; + } else { + dr->next_burst_len = next_burst_len; + dr->read_data_done += datain->length; + } + + if (!dr->recovery) { + if (datain->flags & ISCSI_FLAG_DATA_STATUS) + dr->dr_complete = DATAIN_COMPLETE_NORMAL; + + return dr; + } + + if (!dr->runlength) { + if (datain->flags & ISCSI_FLAG_DATA_STATUS) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } else { + if ((dr->begrun + dr->runlength) == dr->data_sn) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } + + return dr; +} + +/* + * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes. + */ +static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes( + struct iscsit_cmd *cmd, + struct iscsi_datain *datain) +{ + u32 offset, read_data_done, read_data_left, seq_send_order; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_datain_req *dr; + struct iscsi_seq *seq; + + dr = iscsit_get_datain_req(cmd); + if (!dr) + return NULL; + + if (dr->recovery && dr->generate_recovery_values) { + if (iscsit_create_recovery_datain_values_datasequenceinorder_no( + cmd, dr) < 0) + return NULL; + + dr->generate_recovery_values = 0; + } + + read_data_done = (!dr->recovery) ? + cmd->read_data_done : dr->read_data_done; + seq_send_order = (!dr->recovery) ? + cmd->seq_send_order : dr->seq_send_order; + + read_data_left = (cmd->se_cmd.data_length - read_data_done); + if (!read_data_left) { + pr_err("ITT: 0x%08x read_data_left is zero!\n", + cmd->init_task_tag); + return NULL; + } + + seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order); + if (!seq) + return NULL; + + seq->sent = 1; + + if (!dr->recovery && !seq->next_burst_len) + seq->first_datasn = cmd->data_sn; + + offset = (seq->offset + seq->next_burst_len); + + if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= + cmd->se_cmd.data_length) { + datain->length = (cmd->se_cmd.data_length - offset); + datain->offset = offset; + + datain->flags |= ISCSI_FLAG_CMD_FINAL; + if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) + datain->flags |= ISCSI_FLAG_DATA_ACK; + + seq->next_burst_len = 0; + seq_send_order++; + } else { + if ((seq->next_burst_len + + conn->conn_ops->MaxRecvDataSegmentLength) < + conn->sess->sess_ops->MaxBurstLength) { + datain->length = + conn->conn_ops->MaxRecvDataSegmentLength; + datain->offset = (seq->offset + seq->next_burst_len); + + seq->next_burst_len += datain->length; + } else { + datain->length = (conn->sess->sess_ops->MaxBurstLength - + seq->next_burst_len); + datain->offset = (seq->offset + seq->next_burst_len); + + datain->flags |= ISCSI_FLAG_CMD_FINAL; + if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) + datain->flags |= ISCSI_FLAG_DATA_ACK; + + seq->next_burst_len = 0; + seq_send_order++; + } + } + + if ((read_data_done + datain->length) == cmd->se_cmd.data_length) + datain->flags |= ISCSI_FLAG_DATA_STATUS; + + datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; + if (!dr->recovery) { + cmd->seq_send_order = seq_send_order; + cmd->read_data_done += datain->length; + } else { + dr->seq_send_order = seq_send_order; + dr->read_data_done += datain->length; + } + + if (!dr->recovery) { + if (datain->flags & ISCSI_FLAG_CMD_FINAL) + seq->last_datasn = datain->data_sn; + if (datain->flags & ISCSI_FLAG_DATA_STATUS) + dr->dr_complete = DATAIN_COMPLETE_NORMAL; + + return dr; + } + + if (!dr->runlength) { + if (datain->flags & ISCSI_FLAG_DATA_STATUS) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } else { + if ((dr->begrun + dr->runlength) == dr->data_sn) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } + + return dr; +} + +/* + * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No. + */ +static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no( + struct iscsit_cmd *cmd, + struct iscsi_datain *datain) +{ + u32 next_burst_len, read_data_done, read_data_left; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_datain_req *dr; + struct iscsi_pdu *pdu; + + dr = iscsit_get_datain_req(cmd); + if (!dr) + return NULL; + + if (dr->recovery && dr->generate_recovery_values) { + if (iscsit_create_recovery_datain_values_datasequenceinorder_yes( + cmd, dr) < 0) + return NULL; + + dr->generate_recovery_values = 0; + } + + next_burst_len = (!dr->recovery) ? + cmd->next_burst_len : dr->next_burst_len; + read_data_done = (!dr->recovery) ? + cmd->read_data_done : dr->read_data_done; + + read_data_left = (cmd->se_cmd.data_length - read_data_done); + if (!read_data_left) { + pr_err("ITT: 0x%08x read_data_left is zero!\n", + cmd->init_task_tag); + return dr; + } + + pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL); + if (!pdu) + return dr; + + if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) { + pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS); + if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) + pdu->flags |= ISCSI_FLAG_DATA_ACK; + + next_burst_len = 0; + } else { + if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) < + conn->sess->sess_ops->MaxBurstLength) + next_burst_len += pdu->length; + else { + pdu->flags |= ISCSI_FLAG_CMD_FINAL; + if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) + pdu->flags |= ISCSI_FLAG_DATA_ACK; + + next_burst_len = 0; + } + } + + pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; + if (!dr->recovery) { + cmd->next_burst_len = next_burst_len; + cmd->read_data_done += pdu->length; + } else { + dr->next_burst_len = next_burst_len; + dr->read_data_done += pdu->length; + } + + datain->flags = pdu->flags; + datain->length = pdu->length; + datain->offset = pdu->offset; + datain->data_sn = pdu->data_sn; + + if (!dr->recovery) { + if (datain->flags & ISCSI_FLAG_DATA_STATUS) + dr->dr_complete = DATAIN_COMPLETE_NORMAL; + + return dr; + } + + if (!dr->runlength) { + if (datain->flags & ISCSI_FLAG_DATA_STATUS) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } else { + if ((dr->begrun + dr->runlength) == dr->data_sn) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } + + return dr; +} + +/* + * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No. + */ +static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no( + struct iscsit_cmd *cmd, + struct iscsi_datain *datain) +{ + u32 read_data_done, read_data_left, seq_send_order; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_datain_req *dr; + struct iscsi_pdu *pdu; + struct iscsi_seq *seq = NULL; + + dr = iscsit_get_datain_req(cmd); + if (!dr) + return NULL; + + if (dr->recovery && dr->generate_recovery_values) { + if (iscsit_create_recovery_datain_values_datasequenceinorder_no( + cmd, dr) < 0) + return NULL; + + dr->generate_recovery_values = 0; + } + + read_data_done = (!dr->recovery) ? + cmd->read_data_done : dr->read_data_done; + seq_send_order = (!dr->recovery) ? + cmd->seq_send_order : dr->seq_send_order; + + read_data_left = (cmd->se_cmd.data_length - read_data_done); + if (!read_data_left) { + pr_err("ITT: 0x%08x read_data_left is zero!\n", + cmd->init_task_tag); + return NULL; + } + + seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order); + if (!seq) + return NULL; + + seq->sent = 1; + + if (!dr->recovery && !seq->next_burst_len) + seq->first_datasn = cmd->data_sn; + + pdu = iscsit_get_pdu_holder_for_seq(cmd, seq); + if (!pdu) + return NULL; + + if (seq->pdu_send_order == seq->pdu_count) { + pdu->flags |= ISCSI_FLAG_CMD_FINAL; + if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) + pdu->flags |= ISCSI_FLAG_DATA_ACK; + + seq->next_burst_len = 0; + seq_send_order++; + } else + seq->next_burst_len += pdu->length; + + if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) + pdu->flags |= ISCSI_FLAG_DATA_STATUS; + + pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; + if (!dr->recovery) { + cmd->seq_send_order = seq_send_order; + cmd->read_data_done += pdu->length; + } else { + dr->seq_send_order = seq_send_order; + dr->read_data_done += pdu->length; + } + + datain->flags = pdu->flags; + datain->length = pdu->length; + datain->offset = pdu->offset; + datain->data_sn = pdu->data_sn; + + if (!dr->recovery) { + if (datain->flags & ISCSI_FLAG_CMD_FINAL) + seq->last_datasn = datain->data_sn; + if (datain->flags & ISCSI_FLAG_DATA_STATUS) + dr->dr_complete = DATAIN_COMPLETE_NORMAL; + + return dr; + } + + if (!dr->runlength) { + if (datain->flags & ISCSI_FLAG_DATA_STATUS) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } else { + if ((dr->begrun + dr->runlength) == dr->data_sn) { + dr->dr_complete = + (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ? + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY : + DATAIN_COMPLETE_CONNECTION_RECOVERY; + } + } + + return dr; +} + +struct iscsi_datain_req *iscsit_get_datain_values( + struct iscsit_cmd *cmd, + struct iscsi_datain *datain) +{ + struct iscsit_conn *conn = cmd->conn; + + if (conn->sess->sess_ops->DataSequenceInOrder && + conn->sess->sess_ops->DataPDUInOrder) + return iscsit_set_datain_values_yes_and_yes(cmd, datain); + else if (!conn->sess->sess_ops->DataSequenceInOrder && + conn->sess->sess_ops->DataPDUInOrder) + return iscsit_set_datain_values_no_and_yes(cmd, datain); + else if (conn->sess->sess_ops->DataSequenceInOrder && + !conn->sess->sess_ops->DataPDUInOrder) + return iscsit_set_datain_values_yes_and_no(cmd, datain); + else if (!conn->sess->sess_ops->DataSequenceInOrder && + !conn->sess->sess_ops->DataPDUInOrder) + return iscsit_set_datain_values_no_and_no(cmd, datain); + + return NULL; +} +EXPORT_SYMBOL(iscsit_get_datain_values); diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h new file mode 100644 index 0000000000..b28df886d8 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_datain_values.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_DATAIN_VALUES_H +#define ISCSI_TARGET_DATAIN_VALUES_H + +struct iscsit_cmd; +struct iscsi_datain; + +extern struct iscsi_datain_req *iscsit_allocate_datain_req(void); +extern void iscsit_attach_datain_req(struct iscsit_cmd *, struct iscsi_datain_req *); +extern void iscsit_free_datain_req(struct iscsit_cmd *, struct iscsi_datain_req *); +extern void iscsit_free_all_datain_reqs(struct iscsit_cmd *); +extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsit_cmd *); +extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsit_cmd *, + struct iscsi_datain *); + +#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c new file mode 100644 index 0000000000..b565ce3b26 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains the iSCSI Virtual Device and Disk Transport + * agnostic related functions. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_device.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" + +void iscsit_determine_maxcmdsn(struct iscsit_session *sess) +{ + struct se_node_acl *se_nacl; + + /* + * This is a discovery session, the single queue slot was already + * assigned in iscsi_login_zero_tsih(). Since only Logout and + * Text Opcodes are allowed during discovery we do not have to worry + * about the HBA's queue depth here. + */ + if (sess->sess_ops->SessionType) + return; + + se_nacl = sess->se_sess->se_node_acl; + + /* + * This is a normal session, set the Session's CmdSN window to the + * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth + * has already been validated as a legal value in + * core_set_queue_depth_for_node(). + */ + sess->cmdsn_window = se_nacl->queue_depth; + atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn); +} + +void iscsit_increment_maxcmdsn(struct iscsit_cmd *cmd, struct iscsit_session *sess) +{ + u32 max_cmd_sn; + + if (cmd->immediate_cmd || cmd->maxcmdsn_inc) + return; + + cmd->maxcmdsn_inc = 1; + + max_cmd_sn = atomic_inc_return(&sess->max_cmd_sn); + pr_debug("Updated MaxCmdSN to 0x%08x\n", max_cmd_sn); +} +EXPORT_SYMBOL(iscsit_increment_maxcmdsn); diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h new file mode 100644 index 0000000000..3663401205 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_device.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_DEVICE_H +#define ISCSI_TARGET_DEVICE_H + +struct iscsit_cmd; +struct iscsit_session; + +extern void iscsit_determine_maxcmdsn(struct iscsit_session *); +extern void iscsit_increment_maxcmdsn(struct iscsit_cmd *, struct iscsit_session *); + +#endif /* ISCSI_TARGET_DEVICE_H */ diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c new file mode 100644 index 0000000000..07e9cf431e --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -0,0 +1,936 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/****************************************************************************** + * This file contains error recovery level zero functions used by + * the iSCSI Target driver. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/sched/signal.h> + +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_seq_pdu_list.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_erl1.h" +#include "iscsi_target_erl2.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" + +/* + * Used to set values in struct iscsit_cmd that iscsit_dataout_check_sequence() + * checks against to determine a PDU's Offset+Length is within the current + * DataOUT Sequence. Used for DataSequenceInOrder=Yes only. + */ +void iscsit_set_dataout_sequence_values( + struct iscsit_cmd *cmd) +{ + struct iscsit_conn *conn = cmd->conn; + /* + * Still set seq_start_offset and seq_end_offset for Unsolicited + * DataOUT, even if DataSequenceInOrder=No. + */ + if (cmd->unsolicited_data) { + cmd->seq_start_offset = cmd->write_data_done; + cmd->seq_end_offset = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); + return; + } + + if (!conn->sess->sess_ops->DataSequenceInOrder) + return; + + if (!cmd->seq_start_offset && !cmd->seq_end_offset) { + cmd->seq_start_offset = cmd->write_data_done; + cmd->seq_end_offset = (cmd->se_cmd.data_length > + conn->sess->sess_ops->MaxBurstLength) ? + (cmd->write_data_done + + conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length; + } else { + cmd->seq_start_offset = cmd->seq_end_offset; + cmd->seq_end_offset = ((cmd->seq_end_offset + + conn->sess->sess_ops->MaxBurstLength) >= + cmd->se_cmd.data_length) ? cmd->se_cmd.data_length : + (cmd->seq_end_offset + + conn->sess->sess_ops->MaxBurstLength); + } +} + +static int iscsit_dataout_within_command_recovery_check( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct iscsit_conn *conn = cmd->conn; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + /* + * We do the within-command recovery checks here as it is + * the first function called in iscsi_check_pre_dataout(). + * Basically, if we are in within-command recovery and + * the PDU does not contain the offset the sequence needs, + * dump the payload. + * + * This only applies to DataPDUInOrder=Yes, for + * DataPDUInOrder=No we only re-request the failed PDU + * and check that all PDUs in a sequence are received + * upon end of sequence. + */ + if (conn->sess->sess_ops->DataSequenceInOrder) { + if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) && + cmd->write_data_done != be32_to_cpu(hdr->offset)) + goto dump; + + cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY; + } else { + struct iscsi_seq *seq; + + seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset), + payload_length); + if (!seq) + return DATAOUT_CANNOT_RECOVER; + /* + * Set the struct iscsi_seq pointer to reuse later. + */ + cmd->seq_ptr = seq; + + if (conn->sess->sess_ops->DataPDUInOrder) { + if (seq->status == + DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY && + (seq->offset != be32_to_cpu(hdr->offset) || + seq->data_sn != be32_to_cpu(hdr->datasn))) + goto dump; + } else { + if (seq->status == + DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY && + seq->data_sn != be32_to_cpu(hdr->datasn)) + goto dump; + } + + if (seq->status == DATAOUT_SEQUENCE_COMPLETE) + goto dump; + + if (seq->status != DATAOUT_SEQUENCE_COMPLETE) + seq->status = 0; + } + + return DATAOUT_NORMAL; + +dump: + pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:" + " 0x%08x\n", hdr->offset, payload_length, hdr->datasn); + return iscsit_dump_data_payload(conn, payload_length, 1); +} + +static int iscsit_dataout_check_unsolicited_sequence( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + u32 first_burst_len; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + + if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) || + ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) { + pr_err("Command ITT: 0x%08x with Offset: %u," + " Length: %u outside of Unsolicited Sequence %u:%u while" + " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, + be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset, + cmd->seq_end_offset); + return DATAOUT_CANNOT_RECOVER; + } + + first_burst_len = (cmd->first_burst_len + payload_length); + + if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) { + pr_err("Total %u bytes exceeds FirstBurstLength: %u" + " for this Unsolicited DataOut Burst.\n", + first_burst_len, conn->sess->sess_ops->FirstBurstLength); + transport_send_check_condition_and_sense(&cmd->se_cmd, + TCM_INCORRECT_AMOUNT_OF_DATA, 0); + return DATAOUT_CANNOT_RECOVER; + } + + /* + * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity + * checks for the current Unsolicited DataOUT Sequence. + */ + if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { + /* + * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of + * sequence checks are handled in + * iscsit_dataout_datapduinorder_no_fbit(). + */ + if (!conn->sess->sess_ops->DataPDUInOrder) + goto out; + + if ((first_burst_len != cmd->se_cmd.data_length) && + (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) { + pr_err("Unsolicited non-immediate data" + " received %u does not equal FirstBurstLength: %u, and" + " does not equal ExpXferLen %u.\n", first_burst_len, + conn->sess->sess_ops->FirstBurstLength, + cmd->se_cmd.data_length); + transport_send_check_condition_and_sense(&cmd->se_cmd, + TCM_INCORRECT_AMOUNT_OF_DATA, 0); + return DATAOUT_CANNOT_RECOVER; + } + } else { + if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) { + pr_err("Command ITT: 0x%08x reached" + " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" + " error.\n", cmd->init_task_tag, + conn->sess->sess_ops->FirstBurstLength); + return DATAOUT_CANNOT_RECOVER; + } + if (first_burst_len == cmd->se_cmd.data_length) { + pr_err("Command ITT: 0x%08x reached" + " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" + " error.\n", cmd->init_task_tag, cmd->se_cmd.data_length); + return DATAOUT_CANNOT_RECOVER; + } + } + +out: + return DATAOUT_NORMAL; +} + +static int iscsit_dataout_check_sequence( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + u32 next_burst_len; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_seq *seq = NULL; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + /* + * For DataSequenceInOrder=Yes: Check that the offset and offset+length + * is within range as defined by iscsi_set_dataout_sequence_values(). + * + * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for + * offset+length tuple. + */ + if (conn->sess->sess_ops->DataSequenceInOrder) { + /* + * Due to possibility of recovery DataOUT sent by the initiator + * fullfilling an Recovery R2T, it's best to just dump the + * payload here, instead of erroring out. + */ + if ((be32_to_cpu(hdr->offset) < cmd->seq_start_offset) || + ((be32_to_cpu(hdr->offset) + payload_length) > cmd->seq_end_offset)) { + pr_err("Command ITT: 0x%08x with Offset: %u," + " Length: %u outside of Sequence %u:%u while" + " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, + be32_to_cpu(hdr->offset), payload_length, cmd->seq_start_offset, + cmd->seq_end_offset); + + if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) + return DATAOUT_CANNOT_RECOVER; + return DATAOUT_WITHIN_COMMAND_RECOVERY; + } + + next_burst_len = (cmd->next_burst_len + payload_length); + } else { + seq = iscsit_get_seq_holder(cmd, be32_to_cpu(hdr->offset), + payload_length); + if (!seq) + return DATAOUT_CANNOT_RECOVER; + /* + * Set the struct iscsi_seq pointer to reuse later. + */ + cmd->seq_ptr = seq; + + if (seq->status == DATAOUT_SEQUENCE_COMPLETE) { + if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) + return DATAOUT_CANNOT_RECOVER; + return DATAOUT_WITHIN_COMMAND_RECOVERY; + } + + next_burst_len = (seq->next_burst_len + payload_length); + } + + if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) { + pr_err("Command ITT: 0x%08x, NextBurstLength: %u and" + " Length: %u exceeds MaxBurstLength: %u. protocol" + " error.\n", cmd->init_task_tag, + (next_burst_len - payload_length), + payload_length, conn->sess->sess_ops->MaxBurstLength); + return DATAOUT_CANNOT_RECOVER; + } + + /* + * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity + * checks for the current DataOUT Sequence. + */ + if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { + /* + * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of + * sequence checks are handled in + * iscsit_dataout_datapduinorder_no_fbit(). + */ + if (!conn->sess->sess_ops->DataPDUInOrder) + goto out; + + if (conn->sess->sess_ops->DataSequenceInOrder) { + if ((next_burst_len < + conn->sess->sess_ops->MaxBurstLength) && + ((cmd->write_data_done + payload_length) < + cmd->se_cmd.data_length)) { + pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" + " before end of DataOUT sequence, protocol" + " error.\n", cmd->init_task_tag); + return DATAOUT_CANNOT_RECOVER; + } + } else { + if (next_burst_len < seq->xfer_len) { + pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" + " before end of DataOUT sequence, protocol" + " error.\n", cmd->init_task_tag); + return DATAOUT_CANNOT_RECOVER; + } + } + } else { + if (conn->sess->sess_ops->DataSequenceInOrder) { + if (next_burst_len == + conn->sess->sess_ops->MaxBurstLength) { + pr_err("Command ITT: 0x%08x reached" + " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is" + " not set, protocol error.", cmd->init_task_tag, + conn->sess->sess_ops->MaxBurstLength); + return DATAOUT_CANNOT_RECOVER; + } + if ((cmd->write_data_done + payload_length) == + cmd->se_cmd.data_length) { + pr_err("Command ITT: 0x%08x reached" + " last DataOUT PDU in sequence but ISCSI_FLAG_" + "CMD_FINAL is not set, protocol error.\n", + cmd->init_task_tag); + return DATAOUT_CANNOT_RECOVER; + } + } else { + if (next_burst_len == seq->xfer_len) { + pr_err("Command ITT: 0x%08x reached" + " last DataOUT PDU in sequence but ISCSI_FLAG_" + "CMD_FINAL is not set, protocol error.\n", + cmd->init_task_tag); + return DATAOUT_CANNOT_RECOVER; + } + } + } + +out: + return DATAOUT_NORMAL; +} + +static int iscsit_dataout_check_datasn( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + u32 data_sn = 0; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + /* + * Considering the target has no method of re-requesting DataOUT + * by DataSN, if we receieve a greater DataSN than expected we + * assume the functions for DataPDUInOrder=[Yes,No] below will + * handle it. + * + * If the DataSN is less than expected, dump the payload. + */ + if (conn->sess->sess_ops->DataSequenceInOrder) + data_sn = cmd->data_sn; + else { + struct iscsi_seq *seq = cmd->seq_ptr; + data_sn = seq->data_sn; + } + + if (be32_to_cpu(hdr->datasn) > data_sn) { + pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" + " higher than expected 0x%08x.\n", cmd->init_task_tag, + be32_to_cpu(hdr->datasn), data_sn); + goto recover; + } else if (be32_to_cpu(hdr->datasn) < data_sn) { + pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" + " lower than expected 0x%08x, discarding payload.\n", + cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); + goto dump; + } + + return DATAOUT_NORMAL; + +recover: + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to perform within-command recovery" + " while ERL=0.\n"); + return DATAOUT_CANNOT_RECOVER; + } +dump: + if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) + return DATAOUT_CANNOT_RECOVER; + + return DATAOUT_WITHIN_COMMAND_RECOVERY; +} + +static int iscsit_dataout_pre_datapduinorder_yes( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + int dump = 0, recovery = 0; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + /* + * For DataSequenceInOrder=Yes: If the offset is greater than the global + * DataPDUInOrder=Yes offset counter in struct iscsit_cmd a protcol error has + * occurred and fail the connection. + * + * For DataSequenceInOrder=No: If the offset is greater than the per + * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol + * error has occurred and fail the connection. + */ + if (conn->sess->sess_ops->DataSequenceInOrder) { + if (be32_to_cpu(hdr->offset) != cmd->write_data_done) { + pr_err("Command ITT: 0x%08x, received offset" + " %u different than expected %u.\n", cmd->init_task_tag, + be32_to_cpu(hdr->offset), cmd->write_data_done); + recovery = 1; + goto recover; + } + } else { + struct iscsi_seq *seq = cmd->seq_ptr; + + if (be32_to_cpu(hdr->offset) > seq->offset) { + pr_err("Command ITT: 0x%08x, received offset" + " %u greater than expected %u.\n", cmd->init_task_tag, + be32_to_cpu(hdr->offset), seq->offset); + recovery = 1; + goto recover; + } else if (be32_to_cpu(hdr->offset) < seq->offset) { + pr_err("Command ITT: 0x%08x, received offset" + " %u less than expected %u, discarding payload.\n", + cmd->init_task_tag, be32_to_cpu(hdr->offset), + seq->offset); + dump = 1; + goto dump; + } + } + + return DATAOUT_NORMAL; + +recover: + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to perform within-command recovery" + " while ERL=0.\n"); + return DATAOUT_CANNOT_RECOVER; + } +dump: + if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) + return DATAOUT_CANNOT_RECOVER; + + return (recovery) ? iscsit_recover_dataout_sequence(cmd, + be32_to_cpu(hdr->offset), payload_length) : + (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL; +} + +static int iscsit_dataout_pre_datapduinorder_no( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct iscsi_pdu *pdu; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + pdu = iscsit_get_pdu_holder(cmd, be32_to_cpu(hdr->offset), + payload_length); + if (!pdu) + return DATAOUT_CANNOT_RECOVER; + + cmd->pdu_ptr = pdu; + + switch (pdu->status) { + case ISCSI_PDU_NOT_RECEIVED: + case ISCSI_PDU_CRC_FAILED: + case ISCSI_PDU_TIMED_OUT: + break; + case ISCSI_PDU_RECEIVED_OK: + pr_err("Command ITT: 0x%08x received already gotten" + " Offset: %u, Length: %u\n", cmd->init_task_tag, + be32_to_cpu(hdr->offset), payload_length); + return iscsit_dump_data_payload(cmd->conn, payload_length, 1); + default: + return DATAOUT_CANNOT_RECOVER; + } + + return DATAOUT_NORMAL; +} + +static int iscsit_dataout_update_r2t(struct iscsit_cmd *cmd, u32 offset, u32 length) +{ + struct iscsi_r2t *r2t; + + if (cmd->unsolicited_data) + return 0; + + r2t = iscsit_get_r2t_for_eos(cmd, offset, length); + if (!r2t) + return -1; + + spin_lock_bh(&cmd->r2t_lock); + r2t->seq_complete = 1; + cmd->outstanding_r2ts--; + spin_unlock_bh(&cmd->r2t_lock); + + return 0; +} + +static int iscsit_dataout_update_datapduinorder_no( + struct iscsit_cmd *cmd, + u32 data_sn, + int f_bit) +{ + int ret = 0; + struct iscsi_pdu *pdu = cmd->pdu_ptr; + + pdu->data_sn = data_sn; + + switch (pdu->status) { + case ISCSI_PDU_NOT_RECEIVED: + pdu->status = ISCSI_PDU_RECEIVED_OK; + break; + case ISCSI_PDU_CRC_FAILED: + pdu->status = ISCSI_PDU_RECEIVED_OK; + break; + case ISCSI_PDU_TIMED_OUT: + pdu->status = ISCSI_PDU_RECEIVED_OK; + break; + default: + return DATAOUT_CANNOT_RECOVER; + } + + if (f_bit) { + ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu); + if (ret == DATAOUT_CANNOT_RECOVER) + return ret; + } + + return DATAOUT_NORMAL; +} + +static int iscsit_dataout_post_crc_passed( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + int ret, send_r2t = 0; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_seq *seq = NULL; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + if (cmd->unsolicited_data) { + if ((cmd->first_burst_len + payload_length) == + conn->sess->sess_ops->FirstBurstLength) { + if (iscsit_dataout_update_r2t(cmd, be32_to_cpu(hdr->offset), + payload_length) < 0) + return DATAOUT_CANNOT_RECOVER; + send_r2t = 1; + } + + if (!conn->sess->sess_ops->DataPDUInOrder) { + ret = iscsit_dataout_update_datapduinorder_no(cmd, + be32_to_cpu(hdr->datasn), + (hdr->flags & ISCSI_FLAG_CMD_FINAL)); + if (ret == DATAOUT_CANNOT_RECOVER) + return ret; + } + + cmd->first_burst_len += payload_length; + + if (conn->sess->sess_ops->DataSequenceInOrder) + cmd->data_sn++; + else { + seq = cmd->seq_ptr; + seq->data_sn++; + seq->offset += payload_length; + } + + if (send_r2t) { + if (seq) + seq->status = DATAOUT_SEQUENCE_COMPLETE; + cmd->first_burst_len = 0; + cmd->unsolicited_data = 0; + } + } else { + if (conn->sess->sess_ops->DataSequenceInOrder) { + if ((cmd->next_burst_len + payload_length) == + conn->sess->sess_ops->MaxBurstLength) { + if (iscsit_dataout_update_r2t(cmd, + be32_to_cpu(hdr->offset), + payload_length) < 0) + return DATAOUT_CANNOT_RECOVER; + send_r2t = 1; + } + + if (!conn->sess->sess_ops->DataPDUInOrder) { + ret = iscsit_dataout_update_datapduinorder_no( + cmd, be32_to_cpu(hdr->datasn), + (hdr->flags & ISCSI_FLAG_CMD_FINAL)); + if (ret == DATAOUT_CANNOT_RECOVER) + return ret; + } + + cmd->next_burst_len += payload_length; + cmd->data_sn++; + + if (send_r2t) + cmd->next_burst_len = 0; + } else { + seq = cmd->seq_ptr; + + if ((seq->next_burst_len + payload_length) == + seq->xfer_len) { + if (iscsit_dataout_update_r2t(cmd, + be32_to_cpu(hdr->offset), + payload_length) < 0) + return DATAOUT_CANNOT_RECOVER; + send_r2t = 1; + } + + if (!conn->sess->sess_ops->DataPDUInOrder) { + ret = iscsit_dataout_update_datapduinorder_no( + cmd, be32_to_cpu(hdr->datasn), + (hdr->flags & ISCSI_FLAG_CMD_FINAL)); + if (ret == DATAOUT_CANNOT_RECOVER) + return ret; + } + + seq->data_sn++; + seq->offset += payload_length; + seq->next_burst_len += payload_length; + + if (send_r2t) { + seq->next_burst_len = 0; + seq->status = DATAOUT_SEQUENCE_COMPLETE; + } + } + } + + if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder) + cmd->data_sn = 0; + + cmd->write_data_done += payload_length; + + if (cmd->write_data_done == cmd->se_cmd.data_length) + return DATAOUT_SEND_TO_TRANSPORT; + else if (send_r2t) + return DATAOUT_SEND_R2T; + else + return DATAOUT_NORMAL; +} + +static int iscsit_dataout_post_crc_failed( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct iscsit_conn *conn = cmd->conn; + struct iscsi_pdu *pdu; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + if (conn->sess->sess_ops->DataPDUInOrder) + goto recover; + /* + * The rest of this function is only called when DataPDUInOrder=No. + */ + pdu = cmd->pdu_ptr; + + switch (pdu->status) { + case ISCSI_PDU_NOT_RECEIVED: + pdu->status = ISCSI_PDU_CRC_FAILED; + break; + case ISCSI_PDU_CRC_FAILED: + break; + case ISCSI_PDU_TIMED_OUT: + pdu->status = ISCSI_PDU_CRC_FAILED; + break; + default: + return DATAOUT_CANNOT_RECOVER; + } + +recover: + return iscsit_recover_dataout_sequence(cmd, be32_to_cpu(hdr->offset), + payload_length); +} + +/* + * Called from iscsit_handle_data_out() before DataOUT Payload is received + * and CRC computed. + */ +int iscsit_check_pre_dataout( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + int ret; + struct iscsit_conn *conn = cmd->conn; + + ret = iscsit_dataout_within_command_recovery_check(cmd, buf); + if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || + (ret == DATAOUT_CANNOT_RECOVER)) + return ret; + + ret = iscsit_dataout_check_datasn(cmd, buf); + if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || + (ret == DATAOUT_CANNOT_RECOVER)) + return ret; + + if (cmd->unsolicited_data) { + ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf); + if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || + (ret == DATAOUT_CANNOT_RECOVER)) + return ret; + } else { + ret = iscsit_dataout_check_sequence(cmd, buf); + if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || + (ret == DATAOUT_CANNOT_RECOVER)) + return ret; + } + + return (conn->sess->sess_ops->DataPDUInOrder) ? + iscsit_dataout_pre_datapduinorder_yes(cmd, buf) : + iscsit_dataout_pre_datapduinorder_no(cmd, buf); +} + +/* + * Called from iscsit_handle_data_out() after DataOUT Payload is received + * and CRC computed. + */ +int iscsit_check_post_dataout( + struct iscsit_cmd *cmd, + unsigned char *buf, + u8 data_crc_failed) +{ + struct iscsit_conn *conn = cmd->conn; + + cmd->dataout_timeout_retries = 0; + + if (!data_crc_failed) + return iscsit_dataout_post_crc_passed(cmd, buf); + else { + if (!conn->sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from DataOUT CRC" + " failure while ERL=0, closing session.\n"); + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, + buf); + return DATAOUT_CANNOT_RECOVER; + } + + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf); + return iscsit_dataout_post_crc_failed(cmd, buf); + } +} + +void iscsit_handle_time2retain_timeout(struct timer_list *t) +{ + struct iscsit_session *sess = from_timer(sess, t, time2retain_timer); + struct iscsi_portal_group *tpg = sess->tpg; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + + spin_lock_bh(&se_tpg->session_lock); + if (sess->time2retain_timer_flags & ISCSI_TF_STOP) { + spin_unlock_bh(&se_tpg->session_lock); + return; + } + if (atomic_read(&sess->session_reinstatement)) { + pr_err("Exiting Time2Retain handler because" + " session_reinstatement=1\n"); + spin_unlock_bh(&se_tpg->session_lock); + return; + } + sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED; + + pr_err("Time2Retain timer expired for SID: %u, cleaning up" + " iSCSI session.\n", sess->sid); + + iscsit_fill_cxn_timeout_err_stats(sess); + spin_unlock_bh(&se_tpg->session_lock); + iscsit_close_session(sess, false); +} + +void iscsit_start_time2retain_handler(struct iscsit_session *sess) +{ + int tpg_active; + /* + * Only start Time2Retain timer when the associated TPG is still in + * an ACTIVE (eg: not disabled or shutdown) state. + */ + spin_lock(&sess->tpg->tpg_state_lock); + tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE); + spin_unlock(&sess->tpg->tpg_state_lock); + + if (!tpg_active) + return; + + if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING) + return; + + pr_debug("Starting Time2Retain timer for %u seconds on" + " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid); + + sess->time2retain_timer_flags &= ~ISCSI_TF_STOP; + sess->time2retain_timer_flags |= ISCSI_TF_RUNNING; + mod_timer(&sess->time2retain_timer, + jiffies + sess->sess_ops->DefaultTime2Retain * HZ); +} + +int iscsit_stop_time2retain_timer(struct iscsit_session *sess) +{ + struct iscsi_portal_group *tpg = sess->tpg; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + + lockdep_assert_held(&se_tpg->session_lock); + + if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) + return -1; + + if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING)) + return 0; + + sess->time2retain_timer_flags |= ISCSI_TF_STOP; + spin_unlock(&se_tpg->session_lock); + + del_timer_sync(&sess->time2retain_timer); + + spin_lock(&se_tpg->session_lock); + sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; + pr_debug("Stopped Time2Retain Timer for SID: %u\n", + sess->sid); + return 0; +} + +void iscsit_connection_reinstatement_rcfr(struct iscsit_conn *conn) +{ + spin_lock_bh(&conn->state_lock); + if (atomic_read(&conn->connection_exit)) { + spin_unlock_bh(&conn->state_lock); + goto sleep; + } + + if (atomic_read(&conn->transport_failed)) { + spin_unlock_bh(&conn->state_lock); + goto sleep; + } + spin_unlock_bh(&conn->state_lock); + + if (conn->tx_thread && conn->tx_thread_active) + send_sig(SIGINT, conn->tx_thread, 1); + if (conn->rx_thread && conn->rx_thread_active) + send_sig(SIGINT, conn->rx_thread, 1); + +sleep: + wait_for_completion(&conn->conn_wait_rcfr_comp); + complete(&conn->conn_post_wait_comp); +} + +void iscsit_cause_connection_reinstatement(struct iscsit_conn *conn, int sleep) +{ + spin_lock_bh(&conn->state_lock); + if (atomic_read(&conn->connection_exit)) { + spin_unlock_bh(&conn->state_lock); + return; + } + + if (atomic_read(&conn->transport_failed)) { + spin_unlock_bh(&conn->state_lock); + return; + } + + if (atomic_read(&conn->connection_reinstatement)) { + spin_unlock_bh(&conn->state_lock); + return; + } + + if (conn->tx_thread && conn->tx_thread_active) + send_sig(SIGINT, conn->tx_thread, 1); + if (conn->rx_thread && conn->rx_thread_active) + send_sig(SIGINT, conn->rx_thread, 1); + + atomic_set(&conn->connection_reinstatement, 1); + if (!sleep) { + spin_unlock_bh(&conn->state_lock); + return; + } + + atomic_set(&conn->sleep_on_conn_wait_comp, 1); + spin_unlock_bh(&conn->state_lock); + + wait_for_completion(&conn->conn_wait_comp); + complete(&conn->conn_post_wait_comp); +} +EXPORT_SYMBOL(iscsit_cause_connection_reinstatement); + +void iscsit_fall_back_to_erl0(struct iscsit_session *sess) +{ + pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:" + " %u\n", sess->sid); + + atomic_set(&sess->session_fall_back_to_erl0, 1); +} + +static void iscsit_handle_connection_cleanup(struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + + if ((sess->sess_ops->ErrorRecoveryLevel == 2) && + !atomic_read(&sess->session_reinstatement) && + !atomic_read(&sess->session_fall_back_to_erl0)) + iscsit_connection_recovery_transport_reset(conn); + else { + pr_debug("Performing cleanup for failed iSCSI" + " Connection ID: %hu from %s\n", conn->cid, + sess->sess_ops->InitiatorName); + iscsit_close_connection(conn); + } +} + +void iscsit_take_action_for_connection_exit(struct iscsit_conn *conn, bool *conn_freed) +{ + *conn_freed = false; + + spin_lock_bh(&conn->state_lock); + if (atomic_read(&conn->connection_exit)) { + spin_unlock_bh(&conn->state_lock); + return; + } + atomic_set(&conn->connection_exit, 1); + + if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { + spin_unlock_bh(&conn->state_lock); + iscsit_close_connection(conn); + *conn_freed = true; + return; + } + + if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) { + spin_unlock_bh(&conn->state_lock); + return; + } + + pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); + conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; + spin_unlock_bh(&conn->state_lock); + + iscsit_handle_connection_cleanup(conn); + *conn_freed = true; +} diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h new file mode 100644 index 0000000000..2a877d1397 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_erl0.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_ERL0_H +#define ISCSI_TARGET_ERL0_H + +#include <linux/types.h> + +struct iscsit_cmd; +struct iscsit_conn; +struct iscsit_session; + +extern void iscsit_set_dataout_sequence_values(struct iscsit_cmd *); +extern int iscsit_check_pre_dataout(struct iscsit_cmd *, unsigned char *); +extern int iscsit_check_post_dataout(struct iscsit_cmd *, unsigned char *, u8); +extern void iscsit_start_time2retain_handler(struct iscsit_session *); +extern void iscsit_handle_time2retain_timeout(struct timer_list *t); +extern int iscsit_stop_time2retain_timer(struct iscsit_session *); +extern void iscsit_connection_reinstatement_rcfr(struct iscsit_conn *); +extern void iscsit_cause_connection_reinstatement(struct iscsit_conn *, int); +extern void iscsit_fall_back_to_erl0(struct iscsit_session *); +extern void iscsit_take_action_for_connection_exit(struct iscsit_conn *, bool *); + +#endif /*** ISCSI_TARGET_ERL0_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c new file mode 100644 index 0000000000..f460a66c0e --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -0,0 +1,1239 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains error recovery level one used by the iSCSI Target driver. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/list.h> +#include <linux/slab.h> +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_seq_pdu_list.h" +#include "iscsi_target_datain_values.h" +#include "iscsi_target_device.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_erl1.h" +#include "iscsi_target_erl2.h" +#include "iscsi_target.h" + +#define OFFLOAD_BUF_SIZE 32768U + +/* + * Used to dump excess datain payload for certain error recovery + * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data(). + * + * dump_padding_digest denotes if padding and data digests need + * to be dumped. + */ +int iscsit_dump_data_payload( + struct iscsit_conn *conn, + u32 buf_len, + int dump_padding_digest) +{ + char *buf; + int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got; + u32 length, offset = 0, size; + struct kvec iov; + + if (conn->sess->sess_ops->RDMAExtensions) + return 0; + + if (dump_padding_digest) { + buf_len = ALIGN(buf_len, 4); + if (conn->conn_ops->DataDigest) + buf_len += ISCSI_CRC_LEN; + } + + length = min(buf_len, OFFLOAD_BUF_SIZE); + + buf = kzalloc(length, GFP_ATOMIC); + if (!buf) { + pr_err("Unable to allocate %u bytes for offload" + " buffer.\n", length); + return -1; + } + memset(&iov, 0, sizeof(struct kvec)); + + while (offset < buf_len) { + size = min(buf_len - offset, length); + + iov.iov_len = size; + iov.iov_base = buf; + + rx_got = rx_data(conn, &iov, 1, size); + if (rx_got != size) { + ret = DATAOUT_CANNOT_RECOVER; + break; + } + + offset += size; + } + + kfree(buf); + return ret; +} + +/* + * Used for retransmitting R2Ts from a R2T SNACK request. + */ +static int iscsit_send_recovery_r2t_for_snack( + struct iscsit_cmd *cmd, + struct iscsi_r2t *r2t) +{ + /* + * If the struct iscsi_r2t has not been sent yet, we can safely + * ignore retransmission + * of the R2TSN in question. + */ + spin_lock_bh(&cmd->r2t_lock); + if (!r2t->sent_r2t) { + spin_unlock_bh(&cmd->r2t_lock); + return 0; + } + r2t->sent_r2t = 0; + spin_unlock_bh(&cmd->r2t_lock); + + iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); + + return 0; +} + +static int iscsit_handle_r2t_snack( + struct iscsit_cmd *cmd, + unsigned char *buf, + u32 begrun, + u32 runlength) +{ + u32 last_r2tsn; + struct iscsi_r2t *r2t; + + /* + * Make sure the initiator is not requesting retransmission + * of R2TSNs already acknowledged by a TMR TASK_REASSIGN. + */ + if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) && + (begrun <= cmd->acked_data_sn)) { + pr_err("ITT: 0x%08x, R2T SNACK requesting" + " retransmission of R2TSN: 0x%08x to 0x%08x but already" + " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN," + " protocol error.\n", cmd->init_task_tag, begrun, + (begrun + runlength), cmd->acked_data_sn); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + if (runlength) { + if ((begrun + runlength) > cmd->r2t_sn) { + pr_err("Command ITT: 0x%08x received R2T SNACK" + " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds" + " current R2TSN: 0x%08x, protocol error.\n", + cmd->init_task_tag, begrun, runlength, cmd->r2t_sn); + return iscsit_reject_cmd(cmd, + ISCSI_REASON_BOOKMARK_INVALID, buf); + } + last_r2tsn = (begrun + runlength); + } else + last_r2tsn = cmd->r2t_sn; + + while (begrun < last_r2tsn) { + r2t = iscsit_get_holder_for_r2tsn(cmd, begrun); + if (!r2t) + return -1; + if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0) + return -1; + + begrun++; + } + + return 0; +} + +/* + * Generates Offsets and NextBurstLength based on Begrun and Runlength + * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN. + * + * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only. + * + * FIXME: How is this handled for a RData SNACK? + */ +int iscsit_create_recovery_datain_values_datasequenceinorder_yes( + struct iscsit_cmd *cmd, + struct iscsi_datain_req *dr) +{ + u32 data_sn = 0, data_sn_count = 0; + u32 pdu_start = 0, seq_no = 0; + u32 begrun = dr->begrun; + struct iscsit_conn *conn = cmd->conn; + + while (begrun > data_sn++) { + data_sn_count++; + if ((dr->next_burst_len + + conn->conn_ops->MaxRecvDataSegmentLength) < + conn->sess->sess_ops->MaxBurstLength) { + dr->read_data_done += + conn->conn_ops->MaxRecvDataSegmentLength; + dr->next_burst_len += + conn->conn_ops->MaxRecvDataSegmentLength; + } else { + dr->read_data_done += + (conn->sess->sess_ops->MaxBurstLength - + dr->next_burst_len); + dr->next_burst_len = 0; + pdu_start += data_sn_count; + data_sn_count = 0; + seq_no++; + } + } + + if (!conn->sess->sess_ops->DataPDUInOrder) { + cmd->seq_no = seq_no; + cmd->pdu_start = pdu_start; + cmd->pdu_send_order = data_sn_count; + } + + return 0; +} + +/* + * Generates Offsets and NextBurstLength based on Begrun and Runlength + * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN. + * + * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only. + * + * FIXME: How is this handled for a RData SNACK? + */ +int iscsit_create_recovery_datain_values_datasequenceinorder_no( + struct iscsit_cmd *cmd, + struct iscsi_datain_req *dr) +{ + int found_seq = 0, i; + u32 data_sn, read_data_done = 0, seq_send_order = 0; + u32 begrun = dr->begrun; + u32 runlength = dr->runlength; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_seq *first_seq = NULL, *seq = NULL; + + if (!cmd->seq_list) { + pr_err("struct iscsit_cmd->seq_list is NULL!\n"); + return -1; + } + + /* + * Calculate read_data_done for all sequences containing a + * first_datasn and last_datasn less than the BegRun. + * + * Locate the struct iscsi_seq the BegRun lies within and calculate + * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength. + * + * Also use struct iscsi_seq->seq_send_order to determine where to start. + */ + for (i = 0; i < cmd->seq_count; i++) { + seq = &cmd->seq_list[i]; + + if (!seq->seq_send_order) + first_seq = seq; + + /* + * No data has been transferred for this DataIN sequence, so the + * seq->first_datasn and seq->last_datasn have not been set. + */ + if (!seq->sent) { + pr_err("Ignoring non-sent sequence 0x%08x ->" + " 0x%08x\n\n", seq->first_datasn, + seq->last_datasn); + continue; + } + + /* + * This DataIN sequence is precedes the received BegRun, add the + * total xfer_len of the sequence to read_data_done and reset + * seq->pdu_send_order. + */ + if ((seq->first_datasn < begrun) && + (seq->last_datasn < begrun)) { + pr_err("Pre BegRun sequence 0x%08x ->" + " 0x%08x\n", seq->first_datasn, + seq->last_datasn); + + read_data_done += cmd->seq_list[i].xfer_len; + seq->next_burst_len = seq->pdu_send_order = 0; + continue; + } + + /* + * The BegRun lies within this DataIN sequence. + */ + if ((seq->first_datasn <= begrun) && + (seq->last_datasn >= begrun)) { + pr_err("Found sequence begrun: 0x%08x in" + " 0x%08x -> 0x%08x\n", begrun, + seq->first_datasn, seq->last_datasn); + + seq_send_order = seq->seq_send_order; + data_sn = seq->first_datasn; + seq->next_burst_len = seq->pdu_send_order = 0; + found_seq = 1; + + /* + * For DataPDUInOrder=Yes, while the first DataSN of + * the sequence is less than the received BegRun, add + * the MaxRecvDataSegmentLength to read_data_done and + * to the sequence's next_burst_len; + * + * For DataPDUInOrder=No, while the first DataSN of the + * sequence is less than the received BegRun, find the + * struct iscsi_pdu of the DataSN in question and add the + * MaxRecvDataSegmentLength to read_data_done and to the + * sequence's next_burst_len; + */ + if (conn->sess->sess_ops->DataPDUInOrder) { + while (data_sn < begrun) { + seq->pdu_send_order++; + read_data_done += + conn->conn_ops->MaxRecvDataSegmentLength; + seq->next_burst_len += + conn->conn_ops->MaxRecvDataSegmentLength; + data_sn++; + } + } else { + int j; + struct iscsi_pdu *pdu; + + while (data_sn < begrun) { + seq->pdu_send_order++; + + for (j = 0; j < seq->pdu_count; j++) { + pdu = &cmd->pdu_list[ + seq->pdu_start + j]; + if (pdu->data_sn == data_sn) { + read_data_done += + pdu->length; + seq->next_burst_len += + pdu->length; + } + } + data_sn++; + } + } + continue; + } + + /* + * This DataIN sequence is larger than the received BegRun, + * reset seq->pdu_send_order and continue. + */ + if ((seq->first_datasn > begrun) || + (seq->last_datasn > begrun)) { + pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n", + seq->first_datasn, seq->last_datasn); + + seq->next_burst_len = seq->pdu_send_order = 0; + continue; + } + } + + if (!found_seq) { + if (!begrun) { + if (!first_seq) { + pr_err("ITT: 0x%08x, Begrun: 0x%08x" + " but first_seq is NULL\n", + cmd->init_task_tag, begrun); + return -1; + } + seq_send_order = first_seq->seq_send_order; + seq->next_burst_len = seq->pdu_send_order = 0; + goto done; + } + + pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x," + " BegRun: 0x%08x, RunLength: 0x%08x while" + " DataSequenceInOrder=No and DataPDUInOrder=%s.\n", + cmd->init_task_tag, begrun, runlength, + (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No"); + return -1; + } + +done: + dr->read_data_done = read_data_done; + dr->seq_send_order = seq_send_order; + + return 0; +} + +static int iscsit_handle_recovery_datain( + struct iscsit_cmd *cmd, + unsigned char *buf, + u32 begrun, + u32 runlength) +{ + struct iscsit_conn *conn = cmd->conn; + struct iscsi_datain_req *dr; + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (!(se_cmd->transport_state & CMD_T_COMPLETE)) { + pr_err("Ignoring ITT: 0x%08x Data SNACK\n", + cmd->init_task_tag); + return 0; + } + + /* + * Make sure the initiator is not requesting retransmission + * of DataSNs already acknowledged by a Data ACK SNACK. + */ + if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) && + (begrun <= cmd->acked_data_sn)) { + pr_err("ITT: 0x%08x, Data SNACK requesting" + " retransmission of DataSN: 0x%08x to 0x%08x but" + " already acked to DataSN: 0x%08x by Data ACK SNACK," + " protocol error.\n", cmd->init_task_tag, begrun, + (begrun + runlength), cmd->acked_data_sn); + + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); + } + + /* + * Make sure BegRun and RunLength in the Data SNACK are sane. + * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent. + */ + if ((begrun + runlength) > (cmd->data_sn - 1)) { + pr_err("Initiator requesting BegRun: 0x%08x, RunLength" + ": 0x%08x greater than maximum DataSN: 0x%08x.\n", + begrun, runlength, (cmd->data_sn - 1)); + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, + buf); + } + + dr = iscsit_allocate_datain_req(); + if (!dr) + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, + buf); + + dr->data_sn = dr->begrun = begrun; + dr->runlength = runlength; + dr->generate_recovery_values = 1; + dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY; + + iscsit_attach_datain_req(cmd, dr); + + cmd->i_state = ISTATE_SEND_DATAIN; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + + return 0; +} + +int iscsit_handle_recovery_datain_or_r2t( + struct iscsit_conn *conn, + unsigned char *buf, + itt_t init_task_tag, + u32 targ_xfer_tag, + u32 begrun, + u32 runlength) +{ + struct iscsit_cmd *cmd; + + cmd = iscsit_find_cmd_from_itt(conn, init_task_tag); + if (!cmd) + return 0; + + /* + * FIXME: This will not work for bidi commands. + */ + switch (cmd->data_direction) { + case DMA_TO_DEVICE: + return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength); + case DMA_FROM_DEVICE: + return iscsit_handle_recovery_datain(cmd, buf, begrun, + runlength); + default: + pr_err("Unknown cmd->data_direction: 0x%02x\n", + cmd->data_direction); + return -1; + } + + return 0; +} + +/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */ +int iscsit_handle_status_snack( + struct iscsit_conn *conn, + itt_t init_task_tag, + u32 targ_xfer_tag, + u32 begrun, + u32 runlength) +{ + struct iscsit_cmd *cmd = NULL; + u32 last_statsn; + int found_cmd; + + if (!begrun) { + begrun = conn->exp_statsn; + } else if (conn->exp_statsn > begrun) { + pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" + " 0x%08x but already got ExpStatSN: 0x%08x on CID:" + " %hu.\n", begrun, runlength, conn->exp_statsn, + conn->cid); + return 0; + } + + last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength); + + while (begrun < last_statsn) { + found_cmd = 0; + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { + if (cmd->stat_sn == begrun) { + found_cmd = 1; + break; + } + } + spin_unlock_bh(&conn->cmd_lock); + + if (!found_cmd) { + pr_err("Unable to find StatSN: 0x%08x for" + " a Status SNACK, assuming this was a" + " protactic SNACK for an untransmitted" + " StatSN, ignoring.\n", begrun); + begrun++; + continue; + } + + spin_lock_bh(&cmd->istate_lock); + if (cmd->i_state == ISTATE_SEND_DATAIN) { + spin_unlock_bh(&cmd->istate_lock); + pr_err("Ignoring Status SNACK for BegRun:" + " 0x%08x, RunLength: 0x%08x, assuming this was" + " a protactic SNACK for an untransmitted" + " StatSN\n", begrun, runlength); + begrun++; + continue; + } + spin_unlock_bh(&cmd->istate_lock); + + cmd->i_state = ISTATE_SEND_STATUS_RECOVERY; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + begrun++; + } + + return 0; +} + +int iscsit_handle_data_ack( + struct iscsit_conn *conn, + u32 targ_xfer_tag, + u32 begrun, + u32 runlength) +{ + struct iscsit_cmd *cmd = NULL; + + cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag); + if (!cmd) { + pr_err("Data ACK SNACK for TTT: 0x%08x is" + " invalid.\n", targ_xfer_tag); + return -1; + } + + if (begrun <= cmd->acked_data_sn) { + pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is" + " less than the already acked DataSN: 0x%08x.\n", + cmd->init_task_tag, begrun, cmd->acked_data_sn); + return -1; + } + + /* + * For Data ACK SNACK, BegRun is the next expected DataSN. + * (see iSCSI v19: 10.16.6) + */ + cmd->cmd_flags |= ICF_GOT_DATACK_SNACK; + cmd->acked_data_sn = (begrun - 1); + + pr_debug("Received Data ACK SNACK for ITT: 0x%08x," + " updated acked DataSN to 0x%08x.\n", + cmd->init_task_tag, cmd->acked_data_sn); + + return 0; +} + +static int iscsit_send_recovery_r2t( + struct iscsit_cmd *cmd, + u32 offset, + u32 xfer_len) +{ + int ret; + + spin_lock_bh(&cmd->r2t_lock); + ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0); + spin_unlock_bh(&cmd->r2t_lock); + + return ret; +} + +int iscsit_dataout_datapduinorder_no_fbit( + struct iscsit_cmd *cmd, + struct iscsi_pdu *pdu) +{ + int i, send_recovery_r2t = 0, recovery = 0; + u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_pdu *first_pdu = NULL; + + /* + * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count + * of the DataOUT sequence. + */ + if (conn->sess->sess_ops->DataSequenceInOrder) { + for (i = 0; i < cmd->pdu_count; i++) { + if (cmd->pdu_list[i].seq_no == pdu->seq_no) { + if (!first_pdu) + first_pdu = &cmd->pdu_list[i]; + xfer_len += cmd->pdu_list[i].length; + pdu_count++; + } else if (pdu_count) + break; + } + } else { + struct iscsi_seq *seq = cmd->seq_ptr; + + first_pdu = &cmd->pdu_list[seq->pdu_start]; + pdu_count = seq->pdu_count; + } + + if (!first_pdu || !pdu_count) + return DATAOUT_CANNOT_RECOVER; + + /* + * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu. + * The following ugly logic does batching of not received PDUs. + */ + for (i = 0; i < pdu_count; i++) { + if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) { + if (!send_recovery_r2t) + continue; + + if (iscsit_send_recovery_r2t(cmd, offset, length) < 0) + return DATAOUT_CANNOT_RECOVER; + + send_recovery_r2t = length = offset = 0; + continue; + } + /* + * Set recovery = 1 for any missing, CRC failed, or timed + * out PDUs to let the DataOUT logic know that this sequence + * has not been completed yet. + * + * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED. + * We assume if the PDU either failed CRC or timed out + * that a Recovery R2T has already been sent. + */ + recovery = 1; + + if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED) + continue; + + if (!offset) + offset = first_pdu[i].offset; + length += first_pdu[i].length; + + send_recovery_r2t = 1; + } + + if (send_recovery_r2t) + if (iscsit_send_recovery_r2t(cmd, offset, length) < 0) + return DATAOUT_CANNOT_RECOVER; + + return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY; +} + +static int iscsit_recalculate_dataout_values( + struct iscsit_cmd *cmd, + u32 pdu_offset, + u32 pdu_length, + u32 *r2t_offset, + u32 *r2t_length) +{ + int i; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_pdu *pdu = NULL; + + if (conn->sess->sess_ops->DataSequenceInOrder) { + cmd->data_sn = 0; + + if (conn->sess->sess_ops->DataPDUInOrder) { + *r2t_offset = cmd->write_data_done; + *r2t_length = (cmd->seq_end_offset - + cmd->write_data_done); + return 0; + } + + *r2t_offset = cmd->seq_start_offset; + *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset); + + for (i = 0; i < cmd->pdu_count; i++) { + pdu = &cmd->pdu_list[i]; + + if (pdu->status != ISCSI_PDU_RECEIVED_OK) + continue; + + if ((pdu->offset >= cmd->seq_start_offset) && + ((pdu->offset + pdu->length) <= + cmd->seq_end_offset)) { + if (!cmd->unsolicited_data) + cmd->next_burst_len -= pdu->length; + else + cmd->first_burst_len -= pdu->length; + + cmd->write_data_done -= pdu->length; + pdu->status = ISCSI_PDU_NOT_RECEIVED; + } + } + } else { + struct iscsi_seq *seq = NULL; + + seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length); + if (!seq) + return -1; + + *r2t_offset = seq->orig_offset; + *r2t_length = seq->xfer_len; + + cmd->write_data_done -= (seq->offset - seq->orig_offset); + if (cmd->immediate_data) + cmd->first_burst_len = cmd->write_data_done; + + seq->data_sn = 0; + seq->offset = seq->orig_offset; + seq->next_burst_len = 0; + seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY; + + if (conn->sess->sess_ops->DataPDUInOrder) + return 0; + + for (i = 0; i < seq->pdu_count; i++) { + pdu = &cmd->pdu_list[i+seq->pdu_start]; + + if (pdu->status != ISCSI_PDU_RECEIVED_OK) + continue; + + pdu->status = ISCSI_PDU_NOT_RECEIVED; + } + } + + return 0; +} + +int iscsit_recover_dataout_sequence( + struct iscsit_cmd *cmd, + u32 pdu_offset, + u32 pdu_length) +{ + u32 r2t_length = 0, r2t_offset = 0; + + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY; + spin_unlock_bh(&cmd->istate_lock); + + if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length, + &r2t_offset, &r2t_length) < 0) + return DATAOUT_CANNOT_RECOVER; + + iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length); + + return DATAOUT_WITHIN_COMMAND_RECOVERY; +} + +static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void) +{ + struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL; + + ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC); + if (!ooo_cmdsn) { + pr_err("Unable to allocate memory for" + " struct iscsi_ooo_cmdsn.\n"); + return NULL; + } + INIT_LIST_HEAD(&ooo_cmdsn->ooo_list); + + return ooo_cmdsn; +} + +static int iscsit_attach_ooo_cmdsn( + struct iscsit_session *sess, + struct iscsi_ooo_cmdsn *ooo_cmdsn) +{ + struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp; + + lockdep_assert_held(&sess->cmdsn_mutex); + + /* + * We attach the struct iscsi_ooo_cmdsn entry to the out of order + * list in increasing CmdSN order. + * This allows iscsi_execute_ooo_cmdsns() to detect any + * additional CmdSN holes while performing delayed execution. + */ + if (list_empty(&sess->sess_ooo_cmdsn_list)) + list_add_tail(&ooo_cmdsn->ooo_list, + &sess->sess_ooo_cmdsn_list); + else { + ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev, + typeof(*ooo_tail), ooo_list); + /* + * CmdSN is greater than the tail of the list. + */ + if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn)) + list_add_tail(&ooo_cmdsn->ooo_list, + &sess->sess_ooo_cmdsn_list); + else { + /* + * CmdSN is either lower than the head, or somewhere + * in the middle. + */ + list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, + ooo_list) { + if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn)) + continue; + + /* Insert before this entry */ + list_add(&ooo_cmdsn->ooo_list, + ooo_tmp->ooo_list.prev); + break; + } + } + } + + return 0; +} + +/* + * Removes an struct iscsi_ooo_cmdsn from a session's list, + * called with struct iscsit_session->cmdsn_mutex held. + */ +void iscsit_remove_ooo_cmdsn( + struct iscsit_session *sess, + struct iscsi_ooo_cmdsn *ooo_cmdsn) +{ + list_del(&ooo_cmdsn->ooo_list); + kmem_cache_free(lio_ooo_cache, ooo_cmdsn); +} + +void iscsit_clear_ooo_cmdsns_for_conn(struct iscsit_conn *conn) +{ + struct iscsi_ooo_cmdsn *ooo_cmdsn; + struct iscsit_session *sess = conn->sess; + + mutex_lock(&sess->cmdsn_mutex); + list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) { + if (ooo_cmdsn->cid != conn->cid) + continue; + + ooo_cmdsn->cmd = NULL; + } + mutex_unlock(&sess->cmdsn_mutex); +} + +int iscsit_execute_ooo_cmdsns(struct iscsit_session *sess) +{ + int ooo_count = 0; + struct iscsit_cmd *cmd = NULL; + struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp; + + lockdep_assert_held(&sess->cmdsn_mutex); + + list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp, + &sess->sess_ooo_cmdsn_list, ooo_list) { + if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn) + continue; + + if (!ooo_cmdsn->cmd) { + sess->exp_cmd_sn++; + iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn); + continue; + } + + cmd = ooo_cmdsn->cmd; + cmd->i_state = cmd->deferred_i_state; + ooo_count++; + sess->exp_cmd_sn++; + pr_debug("Executing out of order CmdSN: 0x%08x," + " incremented ExpCmdSN to 0x%08x.\n", + cmd->cmd_sn, sess->exp_cmd_sn); + + iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn); + + if (iscsit_execute_cmd(cmd, 1) < 0) + return -1; + } + + return ooo_count; +} + +/* + * Called either: + * + * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns() + * or iscsi_check_received_cmdsn(). + * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions + * for immediate commands. + */ +int iscsit_execute_cmd(struct iscsit_cmd *cmd, int ooo) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct iscsit_conn *conn = cmd->conn; + int lr = 0; + + spin_lock_bh(&cmd->istate_lock); + if (ooo) + cmd->cmd_flags &= ~ICF_OOO_CMDSN; + + switch (cmd->iscsi_opcode) { + case ISCSI_OP_SCSI_CMD: + /* + * Go ahead and send the CHECK_CONDITION status for + * any SCSI CDB exceptions that may have occurred. + */ + if (cmd->sense_reason) { + if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) { + cmd->i_state = ISTATE_SEND_STATUS; + spin_unlock_bh(&cmd->istate_lock); + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, + cmd->i_state); + return 0; + } + spin_unlock_bh(&cmd->istate_lock); + if (cmd->se_cmd.transport_state & CMD_T_ABORTED) + return 0; + return transport_send_check_condition_and_sense(se_cmd, + cmd->sense_reason, 0); + } + /* + * Special case for delayed CmdSN with Immediate + * Data and/or Unsolicited Data Out attached. + */ + if (cmd->immediate_data) { + if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { + spin_unlock_bh(&cmd->istate_lock); + target_execute_cmd(&cmd->se_cmd); + return 0; + } + spin_unlock_bh(&cmd->istate_lock); + + if (!(cmd->cmd_flags & + ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { + if (cmd->se_cmd.transport_state & CMD_T_ABORTED) + return 0; + + iscsit_set_dataout_sequence_values(cmd); + conn->conn_transport->iscsit_get_dataout(conn, cmd, false); + } + return 0; + } + /* + * The default handler. + */ + spin_unlock_bh(&cmd->istate_lock); + + if ((cmd->data_direction == DMA_TO_DEVICE) && + !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { + if (cmd->se_cmd.transport_state & CMD_T_ABORTED) + return 0; + + iscsit_set_unsolicited_dataout(cmd); + } + return transport_handle_cdb_direct(&cmd->se_cmd); + + case ISCSI_OP_NOOP_OUT: + case ISCSI_OP_TEXT: + spin_unlock_bh(&cmd->istate_lock); + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + break; + case ISCSI_OP_SCSI_TMFUNC: + if (cmd->se_cmd.se_tmr_req->response) { + spin_unlock_bh(&cmd->istate_lock); + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, + cmd->i_state); + return 0; + } + spin_unlock_bh(&cmd->istate_lock); + + return transport_generic_handle_tmr(&cmd->se_cmd); + case ISCSI_OP_LOGOUT: + spin_unlock_bh(&cmd->istate_lock); + switch (cmd->logout_reason) { + case ISCSI_LOGOUT_REASON_CLOSE_SESSION: + lr = iscsit_logout_closesession(cmd, cmd->conn); + break; + case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: + lr = iscsit_logout_closeconnection(cmd, cmd->conn); + break; + case ISCSI_LOGOUT_REASON_RECOVERY: + lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn); + break; + default: + pr_err("Unknown iSCSI Logout Request Code:" + " 0x%02x\n", cmd->logout_reason); + return -1; + } + + return lr; + default: + spin_unlock_bh(&cmd->istate_lock); + pr_err("Cannot perform out of order execution for" + " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode); + return -1; + } + + return 0; +} + +void iscsit_free_all_ooo_cmdsns(struct iscsit_session *sess) +{ + struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp; + + mutex_lock(&sess->cmdsn_mutex); + list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp, + &sess->sess_ooo_cmdsn_list, ooo_list) { + + list_del(&ooo_cmdsn->ooo_list); + kmem_cache_free(lio_ooo_cache, ooo_cmdsn); + } + mutex_unlock(&sess->cmdsn_mutex); +} + +int iscsit_handle_ooo_cmdsn( + struct iscsit_session *sess, + struct iscsit_cmd *cmd, + u32 cmdsn) +{ + int batch = 0; + struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL; + + cmd->deferred_i_state = cmd->i_state; + cmd->i_state = ISTATE_DEFERRED_CMD; + cmd->cmd_flags |= ICF_OOO_CMDSN; + + if (list_empty(&sess->sess_ooo_cmdsn_list)) + batch = 1; + else { + ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev, + typeof(*ooo_tail), ooo_list); + if (ooo_tail->cmdsn != (cmdsn - 1)) + batch = 1; + } + + ooo_cmdsn = iscsit_allocate_ooo_cmdsn(); + if (!ooo_cmdsn) + return -ENOMEM; + + ooo_cmdsn->cmd = cmd; + ooo_cmdsn->batch_count = (batch) ? + (cmdsn - sess->exp_cmd_sn) : 1; + ooo_cmdsn->cid = cmd->conn->cid; + ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn; + ooo_cmdsn->cmdsn = cmdsn; + + if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) { + kmem_cache_free(lio_ooo_cache, ooo_cmdsn); + return -ENOMEM; + } + + return 0; +} + +static int iscsit_set_dataout_timeout_values( + struct iscsit_cmd *cmd, + u32 *offset, + u32 *length) +{ + struct iscsit_conn *conn = cmd->conn; + struct iscsi_r2t *r2t; + + if (cmd->unsolicited_data) { + *offset = 0; + *length = (conn->sess->sess_ops->FirstBurstLength > + cmd->se_cmd.data_length) ? + cmd->se_cmd.data_length : + conn->sess->sess_ops->FirstBurstLength; + return 0; + } + + spin_lock_bh(&cmd->r2t_lock); + if (list_empty(&cmd->cmd_r2t_list)) { + pr_err("cmd->cmd_r2t_list is empty!\n"); + spin_unlock_bh(&cmd->r2t_lock); + return -1; + } + + list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { + if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) { + *offset = r2t->offset; + *length = r2t->xfer_len; + spin_unlock_bh(&cmd->r2t_lock); + return 0; + } + } + spin_unlock_bh(&cmd->r2t_lock); + + pr_err("Unable to locate any incomplete DataOUT" + " sequences for ITT: 0x%08x.\n", cmd->init_task_tag); + + return -1; +} + +/* + * NOTE: Called from interrupt (timer) context. + */ +void iscsit_handle_dataout_timeout(struct timer_list *t) +{ + u32 pdu_length = 0, pdu_offset = 0; + u32 r2t_length = 0, r2t_offset = 0; + struct iscsit_cmd *cmd = from_timer(cmd, t, dataout_timer); + struct iscsit_conn *conn = cmd->conn; + struct iscsit_session *sess = NULL; + struct iscsi_node_attrib *na; + + iscsit_inc_conn_usage_count(conn); + + spin_lock_bh(&cmd->dataout_timeout_lock); + if (cmd->dataout_timer_flags & ISCSI_TF_STOP) { + spin_unlock_bh(&cmd->dataout_timeout_lock); + iscsit_dec_conn_usage_count(conn); + return; + } + cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING; + sess = conn->sess; + na = iscsit_tpg_get_node_attrib(sess); + + if (!sess->sess_ops->ErrorRecoveryLevel) { + pr_err("Unable to recover from DataOut timeout while" + " in ERL=0, closing iSCSI connection for I_T Nexus" + " %s,i,0x%6phN,%s,t,0x%02x\n", + sess->sess_ops->InitiatorName, sess->isid, + sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt); + goto failure; + } + + if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) { + pr_err("Command ITT: 0x%08x exceeded max retries" + " for DataOUT timeout %u, closing iSCSI connection for" + " I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n", + cmd->init_task_tag, na->dataout_timeout_retries, + sess->sess_ops->InitiatorName, sess->isid, + sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt); + goto failure; + } + + cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY; + + if (conn->sess->sess_ops->DataSequenceInOrder) { + if (conn->sess->sess_ops->DataPDUInOrder) { + pdu_offset = cmd->write_data_done; + if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len)) > cmd->se_cmd.data_length) + pdu_length = (cmd->se_cmd.data_length - + cmd->write_data_done); + else + pdu_length = (conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len); + } else { + pdu_offset = cmd->seq_start_offset; + pdu_length = (cmd->seq_end_offset - + cmd->seq_start_offset); + } + } else { + if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset, + &pdu_length) < 0) + goto failure; + } + + if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length, + &r2t_offset, &r2t_length) < 0) + goto failure; + + pr_debug("Command ITT: 0x%08x timed out waiting for" + " completion of %sDataOUT Sequence Offset: %u, Length: %u\n", + cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " : + "", r2t_offset, r2t_length); + + if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0) + goto failure; + + iscsit_start_dataout_timer(cmd, conn); + spin_unlock_bh(&cmd->dataout_timeout_lock); + iscsit_dec_conn_usage_count(conn); + + return; + +failure: + spin_unlock_bh(&cmd->dataout_timeout_lock); + iscsit_fill_cxn_timeout_err_stats(sess); + iscsit_cause_connection_reinstatement(conn, 0); + iscsit_dec_conn_usage_count(conn); +} + +void iscsit_mod_dataout_timer(struct iscsit_cmd *cmd) +{ + struct iscsit_conn *conn = cmd->conn; + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); + + spin_lock_bh(&cmd->dataout_timeout_lock); + if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) { + spin_unlock_bh(&cmd->dataout_timeout_lock); + return; + } + + mod_timer(&cmd->dataout_timer, + (get_jiffies_64() + na->dataout_timeout * HZ)); + pr_debug("Updated DataOUT timer for ITT: 0x%08x", + cmd->init_task_tag); + spin_unlock_bh(&cmd->dataout_timeout_lock); +} + +void iscsit_start_dataout_timer( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); + + lockdep_assert_held(&cmd->dataout_timeout_lock); + + if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING) + return; + + pr_debug("Starting DataOUT timer for ITT: 0x%08x on" + " CID: %hu.\n", cmd->init_task_tag, conn->cid); + + cmd->dataout_timer_flags &= ~ISCSI_TF_STOP; + cmd->dataout_timer_flags |= ISCSI_TF_RUNNING; + mod_timer(&cmd->dataout_timer, jiffies + na->dataout_timeout * HZ); +} + +void iscsit_stop_dataout_timer(struct iscsit_cmd *cmd) +{ + spin_lock_bh(&cmd->dataout_timeout_lock); + if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) { + spin_unlock_bh(&cmd->dataout_timeout_lock); + return; + } + cmd->dataout_timer_flags |= ISCSI_TF_STOP; + spin_unlock_bh(&cmd->dataout_timeout_lock); + + del_timer_sync(&cmd->dataout_timer); + + spin_lock_bh(&cmd->dataout_timeout_lock); + cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING; + pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n", + cmd->init_task_tag); + spin_unlock_bh(&cmd->dataout_timeout_lock); +} +EXPORT_SYMBOL(iscsit_stop_dataout_timer); diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h new file mode 100644 index 0000000000..12472eefe5 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_erl1.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_ERL1_H +#define ISCSI_TARGET_ERL1_H + +#include <linux/types.h> +#include <scsi/iscsi_proto.h> /* itt_t */ + +struct iscsit_cmd; +struct iscsit_conn; +struct iscsi_datain_req; +struct iscsi_ooo_cmdsn; +struct iscsi_pdu; +struct iscsit_session; + +extern int iscsit_dump_data_payload(struct iscsit_conn *, u32, int); +extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes( + struct iscsit_cmd *, struct iscsi_datain_req *); +extern int iscsit_create_recovery_datain_values_datasequenceinorder_no( + struct iscsit_cmd *, struct iscsi_datain_req *); +extern int iscsit_handle_recovery_datain_or_r2t(struct iscsit_conn *, unsigned char *, + itt_t, u32, u32, u32); +extern int iscsit_handle_status_snack(struct iscsit_conn *, itt_t, u32, + u32, u32); +extern int iscsit_handle_data_ack(struct iscsit_conn *, u32, u32, u32); +extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsit_cmd *, struct iscsi_pdu *); +extern int iscsit_recover_dataout_sequence(struct iscsit_cmd *, u32, u32); +extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsit_conn *); +extern void iscsit_free_all_ooo_cmdsns(struct iscsit_session *); +extern int iscsit_execute_ooo_cmdsns(struct iscsit_session *); +extern int iscsit_execute_cmd(struct iscsit_cmd *, int); +extern int iscsit_handle_ooo_cmdsn(struct iscsit_session *, struct iscsit_cmd *, u32); +extern void iscsit_remove_ooo_cmdsn(struct iscsit_session *, struct iscsi_ooo_cmdsn *); +extern void iscsit_handle_dataout_timeout(struct timer_list *t); +extern void iscsit_mod_dataout_timer(struct iscsit_cmd *); +extern void iscsit_start_dataout_timer(struct iscsit_cmd *, struct iscsit_conn *); +extern void iscsit_stop_dataout_timer(struct iscsit_cmd *); + +#endif /* ISCSI_TARGET_ERL1_H */ diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c new file mode 100644 index 0000000000..18e88d2ea5 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains error recovery level two functions used by + * the iSCSI Target driver. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_datain_values.h" +#include "iscsi_target_util.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_erl1.h" +#include "iscsi_target_erl2.h" +#include "iscsi_target.h" + +/* + * FIXME: Does RData SNACK apply here as well? + */ +void iscsit_create_conn_recovery_datain_values( + struct iscsit_cmd *cmd, + __be32 exp_data_sn) +{ + u32 data_sn = 0; + struct iscsit_conn *conn = cmd->conn; + + cmd->next_burst_len = 0; + cmd->read_data_done = 0; + + while (be32_to_cpu(exp_data_sn) > data_sn) { + if ((cmd->next_burst_len + + conn->conn_ops->MaxRecvDataSegmentLength) < + conn->sess->sess_ops->MaxBurstLength) { + cmd->read_data_done += + conn->conn_ops->MaxRecvDataSegmentLength; + cmd->next_burst_len += + conn->conn_ops->MaxRecvDataSegmentLength; + } else { + cmd->read_data_done += + (conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len); + cmd->next_burst_len = 0; + } + data_sn++; + } +} + +void iscsit_create_conn_recovery_dataout_values( + struct iscsit_cmd *cmd) +{ + u32 write_data_done = 0; + struct iscsit_conn *conn = cmd->conn; + + cmd->data_sn = 0; + cmd->next_burst_len = 0; + + while (cmd->write_data_done > write_data_done) { + if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <= + cmd->write_data_done) + write_data_done += conn->sess->sess_ops->MaxBurstLength; + else + break; + } + + cmd->write_data_done = write_data_done; +} + +static int iscsit_attach_active_connection_recovery_entry( + struct iscsit_session *sess, + struct iscsi_conn_recovery *cr) +{ + spin_lock(&sess->cr_a_lock); + list_add_tail(&cr->cr_list, &sess->cr_active_list); + spin_unlock(&sess->cr_a_lock); + + return 0; +} + +static int iscsit_attach_inactive_connection_recovery_entry( + struct iscsit_session *sess, + struct iscsi_conn_recovery *cr) +{ + spin_lock(&sess->cr_i_lock); + list_add_tail(&cr->cr_list, &sess->cr_inactive_list); + + sess->conn_recovery_count++; + pr_debug("Incremented connection recovery count to %u for" + " SID: %u\n", sess->conn_recovery_count, sess->sid); + spin_unlock(&sess->cr_i_lock); + + return 0; +} + +struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry( + struct iscsit_session *sess, + u16 cid) +{ + struct iscsi_conn_recovery *cr; + + spin_lock(&sess->cr_i_lock); + list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { + if (cr->cid == cid) { + spin_unlock(&sess->cr_i_lock); + return cr; + } + } + spin_unlock(&sess->cr_i_lock); + + return NULL; +} + +void iscsit_free_connection_recovery_entries(struct iscsit_session *sess) +{ + struct iscsit_cmd *cmd, *cmd_tmp; + struct iscsi_conn_recovery *cr, *cr_tmp; + + spin_lock(&sess->cr_a_lock); + list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) { + list_del(&cr->cr_list); + spin_unlock(&sess->cr_a_lock); + + spin_lock(&cr->conn_recovery_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, + &cr->conn_recovery_cmd_list, i_conn_node) { + + list_del_init(&cmd->i_conn_node); + cmd->conn = NULL; + spin_unlock(&cr->conn_recovery_cmd_lock); + iscsit_free_cmd(cmd, true); + spin_lock(&cr->conn_recovery_cmd_lock); + } + spin_unlock(&cr->conn_recovery_cmd_lock); + spin_lock(&sess->cr_a_lock); + + kfree(cr); + } + spin_unlock(&sess->cr_a_lock); + + spin_lock(&sess->cr_i_lock); + list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) { + list_del(&cr->cr_list); + spin_unlock(&sess->cr_i_lock); + + spin_lock(&cr->conn_recovery_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, + &cr->conn_recovery_cmd_list, i_conn_node) { + + list_del_init(&cmd->i_conn_node); + cmd->conn = NULL; + spin_unlock(&cr->conn_recovery_cmd_lock); + iscsit_free_cmd(cmd, true); + spin_lock(&cr->conn_recovery_cmd_lock); + } + spin_unlock(&cr->conn_recovery_cmd_lock); + spin_lock(&sess->cr_i_lock); + + kfree(cr); + } + spin_unlock(&sess->cr_i_lock); +} + +int iscsit_remove_active_connection_recovery_entry( + struct iscsi_conn_recovery *cr, + struct iscsit_session *sess) +{ + spin_lock(&sess->cr_a_lock); + list_del(&cr->cr_list); + + sess->conn_recovery_count--; + pr_debug("Decremented connection recovery count to %u for" + " SID: %u\n", sess->conn_recovery_count, sess->sid); + spin_unlock(&sess->cr_a_lock); + + kfree(cr); + + return 0; +} + +static void iscsit_remove_inactive_connection_recovery_entry( + struct iscsi_conn_recovery *cr, + struct iscsit_session *sess) +{ + spin_lock(&sess->cr_i_lock); + list_del(&cr->cr_list); + spin_unlock(&sess->cr_i_lock); +} + +/* + * Called with cr->conn_recovery_cmd_lock help. + */ +int iscsit_remove_cmd_from_connection_recovery( + struct iscsit_cmd *cmd, + struct iscsit_session *sess) +{ + struct iscsi_conn_recovery *cr; + + if (!cmd->cr) { + pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x" + " is NULL!\n", cmd->init_task_tag); + BUG(); + } + cr = cmd->cr; + + list_del_init(&cmd->i_conn_node); + return --cr->cmd_count; +} + +void iscsit_discard_cr_cmds_by_expstatsn( + struct iscsi_conn_recovery *cr, + u32 exp_statsn) +{ + u32 dropped_count = 0; + struct iscsit_cmd *cmd, *cmd_tmp; + struct iscsit_session *sess = cr->sess; + + spin_lock(&cr->conn_recovery_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, + &cr->conn_recovery_cmd_list, i_conn_node) { + + if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) && + (cmd->deferred_i_state != ISTATE_REMOVE)) || + (cmd->stat_sn >= exp_statsn)) { + continue; + } + + dropped_count++; + pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:" + " 0x%08x, CID: %hu.\n", cmd->init_task_tag, + cmd->stat_sn, cr->cid); + + iscsit_remove_cmd_from_connection_recovery(cmd, sess); + + spin_unlock(&cr->conn_recovery_cmd_lock); + iscsit_free_cmd(cmd, true); + spin_lock(&cr->conn_recovery_cmd_lock); + } + spin_unlock(&cr->conn_recovery_cmd_lock); + + pr_debug("Dropped %u total acknowledged commands on" + " CID: %hu less than old ExpStatSN: 0x%08x\n", + dropped_count, cr->cid, exp_statsn); + + if (!cr->cmd_count) { + pr_debug("No commands to be reassigned for failed" + " connection CID: %hu on SID: %u\n", + cr->cid, sess->sid); + iscsit_remove_inactive_connection_recovery_entry(cr, sess); + iscsit_attach_active_connection_recovery_entry(sess, cr); + pr_debug("iSCSI connection recovery successful for CID:" + " %hu on SID: %u\n", cr->cid, sess->sid); + iscsit_remove_active_connection_recovery_entry(cr, sess); + } else { + iscsit_remove_inactive_connection_recovery_entry(cr, sess); + iscsit_attach_active_connection_recovery_entry(sess, cr); + } +} + +int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsit_conn *conn) +{ + u32 dropped_count = 0; + struct iscsit_cmd *cmd, *cmd_tmp; + struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp; + struct iscsit_session *sess = conn->sess; + + mutex_lock(&sess->cmdsn_mutex); + list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp, + &sess->sess_ooo_cmdsn_list, ooo_list) { + + if (ooo_cmdsn->cid != conn->cid) + continue; + + dropped_count++; + pr_debug("Dropping unacknowledged CmdSN:" + " 0x%08x during connection recovery on CID: %hu\n", + ooo_cmdsn->cmdsn, conn->cid); + iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn); + } + mutex_unlock(&sess->cmdsn_mutex); + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { + if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) + continue; + + list_del_init(&cmd->i_conn_node); + + spin_unlock_bh(&conn->cmd_lock); + iscsit_free_cmd(cmd, true); + spin_lock_bh(&conn->cmd_lock); + } + spin_unlock_bh(&conn->cmd_lock); + + pr_debug("Dropped %u total unacknowledged commands on CID:" + " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid, + sess->exp_cmd_sn); + return 0; +} + +int iscsit_prepare_cmds_for_reallegiance(struct iscsit_conn *conn) +{ + u32 cmd_count = 0; + struct iscsit_cmd *cmd, *cmd_tmp; + struct iscsi_conn_recovery *cr; + + /* + * Allocate an struct iscsi_conn_recovery for this connection. + * Each struct iscsit_cmd contains an struct iscsi_conn_recovery pointer + * (struct iscsit_cmd->cr) so we need to allocate this before preparing the + * connection's command list for connection recovery. + */ + cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL); + if (!cr) { + pr_err("Unable to allocate memory for" + " struct iscsi_conn_recovery.\n"); + return -1; + } + INIT_LIST_HEAD(&cr->cr_list); + INIT_LIST_HEAD(&cr->conn_recovery_cmd_list); + spin_lock_init(&cr->conn_recovery_cmd_lock); + /* + * Only perform connection recovery on ISCSI_OP_SCSI_CMD or + * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call + * list_del_init(&cmd->i_conn_node); to release the command to the + * session pool and remove it from the connection's list. + * + * Also stop the DataOUT timer, which will be restarted after + * sending the TMR response. + */ + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { + + if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && + (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { + pr_debug("Not performing reallegiance on" + " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x," + " CID: %hu\n", cmd->iscsi_opcode, + cmd->init_task_tag, cmd->cmd_sn, conn->cid); + + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + iscsit_free_cmd(cmd, true); + spin_lock_bh(&conn->cmd_lock); + continue; + } + + /* + * Special case where commands greater than or equal to + * the session's ExpCmdSN are attached to the connection + * list but not to the out of order CmdSN list. The one + * obvious case is when a command with immediate data + * attached must only check the CmdSN against ExpCmdSN + * after the data is received. The special case below + * is when the connection fails before data is received, + * but also may apply to other PDUs, so it has been + * made generic here. + */ + if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && + iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + iscsit_free_cmd(cmd, true); + spin_lock_bh(&conn->cmd_lock); + continue; + } + + cmd_count++; + pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x," + " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for" + " reallegiance.\n", cmd->iscsi_opcode, + cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn, + conn->cid); + + cmd->deferred_i_state = cmd->i_state; + cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY; + + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); + + cmd->sess = conn->sess; + + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + iscsit_free_all_datain_reqs(cmd); + + transport_wait_for_tasks(&cmd->se_cmd); + /* + * Add the struct iscsit_cmd to the connection recovery cmd list + */ + spin_lock(&cr->conn_recovery_cmd_lock); + list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list); + spin_unlock(&cr->conn_recovery_cmd_lock); + + spin_lock_bh(&conn->cmd_lock); + cmd->cr = cr; + cmd->conn = NULL; + } + spin_unlock_bh(&conn->cmd_lock); + /* + * Fill in the various values in the preallocated struct iscsi_conn_recovery. + */ + cr->cid = conn->cid; + cr->cmd_count = cmd_count; + cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength; + cr->maxxmitdatasegmentlength = conn->conn_ops->MaxXmitDataSegmentLength; + cr->sess = conn->sess; + + iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr); + + return 0; +} + +int iscsit_connection_recovery_transport_reset(struct iscsit_conn *conn) +{ + atomic_set(&conn->connection_recovery, 1); + + if (iscsit_close_connection(conn) < 0) + return -1; + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h new file mode 100644 index 0000000000..6655e4bcf8 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_erl2.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_ERL2_H +#define ISCSI_TARGET_ERL2_H + +#include <linux/types.h> + +struct iscsit_cmd; +struct iscsit_conn; +struct iscsi_conn_recovery; +struct iscsit_session; + +extern void iscsit_create_conn_recovery_datain_values(struct iscsit_cmd *, __be32); +extern void iscsit_create_conn_recovery_dataout_values(struct iscsit_cmd *); +extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry( + struct iscsit_session *, u16); +extern void iscsit_free_connection_recovery_entries(struct iscsit_session *); +extern int iscsit_remove_active_connection_recovery_entry( + struct iscsi_conn_recovery *, struct iscsit_session *); +extern int iscsit_remove_cmd_from_connection_recovery(struct iscsit_cmd *, + struct iscsit_session *); +extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32); +extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsit_conn *); +extern int iscsit_prepare_cmds_for_reallegiance(struct iscsit_conn *); +extern int iscsit_connection_recovery_transport_reset(struct iscsit_conn *); + +#endif /*** ISCSI_TARGET_ERL2_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c new file mode 100644 index 0000000000..90b870f234 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -0,0 +1,1428 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains the login functions used by the iSCSI Target driver. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <crypto/hash.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/kthread.h> +#include <linux/sched/signal.h> +#include <linux/idr.h> +#include <linux/tcp.h> /* TCP_NODELAY */ +#include <net/ip.h> +#include <net/ipv6.h> /* ipv6_addr_v4mapped() */ +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include <target/iscsi/iscsi_target_core.h> +#include <target/iscsi/iscsi_target_stat.h> +#include "iscsi_target_device.h" +#include "iscsi_target_nego.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_erl2.h" +#include "iscsi_target_login.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" +#include "iscsi_target_parameters.h" + +#include <target/iscsi/iscsi_transport.h> + +static struct iscsi_login *iscsi_login_init_conn(struct iscsit_conn *conn) +{ + struct iscsi_login *login; + + login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL); + if (!login) { + pr_err("Unable to allocate memory for struct iscsi_login.\n"); + return NULL; + } + conn->login = login; + login->conn = conn; + login->first_request = 1; + + login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); + if (!login->req_buf) { + pr_err("Unable to allocate memory for response buffer.\n"); + goto out_login; + } + + login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); + if (!login->rsp_buf) { + pr_err("Unable to allocate memory for request buffer.\n"); + goto out_req_buf; + } + + conn->conn_login = login; + + return login; + +out_req_buf: + kfree(login->req_buf); +out_login: + kfree(login); + return NULL; +} + +/* + * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup + * per struct iscsit_conn libcrypto contexts for crc32c and crc32-intel + */ +int iscsi_login_setup_crypto(struct iscsit_conn *conn) +{ + struct crypto_ahash *tfm; + + /* + * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts + * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback + * to software 1x8 byte slicing from crc32c.ko + */ + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + pr_err("crypto_alloc_ahash() failed\n"); + return -ENOMEM; + } + + conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!conn->conn_rx_hash) { + pr_err("ahash_request_alloc() failed for conn_rx_hash\n"); + crypto_free_ahash(tfm); + return -ENOMEM; + } + ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL); + + conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL); + if (!conn->conn_tx_hash) { + pr_err("ahash_request_alloc() failed for conn_tx_hash\n"); + ahash_request_free(conn->conn_rx_hash); + conn->conn_rx_hash = NULL; + crypto_free_ahash(tfm); + return -ENOMEM; + } + ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL); + + return 0; +} + +static int iscsi_login_check_initiator_version( + struct iscsit_conn *conn, + u8 version_max, + u8 version_min) +{ + if ((version_max != 0x00) || (version_min != 0x00)) { + pr_err("Unsupported iSCSI IETF Pre-RFC Revision," + " version Min/Max 0x%02x/0x%02x, rejecting login.\n", + version_min, version_max); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_NO_VERSION); + return -1; + } + + return 0; +} + +int iscsi_check_for_session_reinstatement(struct iscsit_conn *conn) +{ + int sessiontype; + struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL; + struct iscsi_portal_group *tpg = conn->tpg; + struct iscsit_session *sess = NULL, *sess_p = NULL; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + struct se_session *se_sess, *se_sess_tmp; + + initiatorname_param = iscsi_find_param_from_key( + INITIATORNAME, conn->param_list); + sessiontype_param = iscsi_find_param_from_key( + SESSIONTYPE, conn->param_list); + if (!initiatorname_param || !sessiontype_param) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_MISSING_FIELDS); + return -1; + } + + sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0; + + spin_lock_bh(&se_tpg->session_lock); + list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, + sess_list) { + + sess_p = se_sess->fabric_sess_ptr; + spin_lock(&sess_p->conn_lock); + if (atomic_read(&sess_p->session_fall_back_to_erl0) || + atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || + (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { + spin_unlock(&sess_p->conn_lock); + continue; + } + if (!memcmp(sess_p->isid, conn->sess->isid, 6) && + (!strcmp(sess_p->sess_ops->InitiatorName, + initiatorname_param->value) && + (sess_p->sess_ops->SessionType == sessiontype))) { + atomic_set(&sess_p->session_reinstatement, 1); + atomic_set(&sess_p->session_fall_back_to_erl0, 1); + atomic_set(&sess_p->session_close, 1); + spin_unlock(&sess_p->conn_lock); + iscsit_inc_session_usage_count(sess_p); + iscsit_stop_time2retain_timer(sess_p); + sess = sess_p; + break; + } + spin_unlock(&sess_p->conn_lock); + } + spin_unlock_bh(&se_tpg->session_lock); + /* + * If the Time2Retain handler has expired, the session is already gone. + */ + if (!sess) + return 0; + + pr_debug("%s iSCSI Session SID %u is still active for %s," + " performing session reinstatement.\n", (sessiontype) ? + "Discovery" : "Normal", sess->sid, + sess->sess_ops->InitiatorName); + + spin_lock_bh(&sess->conn_lock); + if (sess->session_state == TARG_SESS_STATE_FAILED) { + spin_unlock_bh(&sess->conn_lock); + iscsit_dec_session_usage_count(sess); + return 0; + } + spin_unlock_bh(&sess->conn_lock); + + iscsit_stop_session(sess, 1, 1); + iscsit_dec_session_usage_count(sess); + + return 0; +} + +static int iscsi_login_set_conn_values( + struct iscsit_session *sess, + struct iscsit_conn *conn, + __be16 cid) +{ + int ret; + conn->sess = sess; + conn->cid = be16_to_cpu(cid); + /* + * Generate a random Status sequence number (statsn) for the new + * iSCSI connection. + */ + ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32)); + if (unlikely(ret)) + return ret; + + mutex_lock(&auth_id_lock); + conn->auth_id = iscsit_global->auth_id++; + mutex_unlock(&auth_id_lock); + return 0; +} + +__printf(2, 3) int iscsi_change_param_sprintf( + struct iscsit_conn *conn, + const char *fmt, ...) +{ + va_list args; + unsigned char buf[64]; + + memset(buf, 0, sizeof buf); + + va_start(args, fmt); + vsnprintf(buf, sizeof buf, fmt, args); + va_end(args); + + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(iscsi_change_param_sprintf); + +/* + * This is the leading connection of a new session, + * or session reinstatement. + */ +static int iscsi_login_zero_tsih_s1( + struct iscsit_conn *conn, + unsigned char *buf) +{ + struct iscsit_session *sess = NULL; + struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + int ret; + + sess = kzalloc(sizeof(struct iscsit_session), GFP_KERNEL); + if (!sess) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + pr_err("Could not allocate memory for session\n"); + return -ENOMEM; + } + + if (iscsi_login_set_conn_values(sess, conn, pdu->cid)) + goto free_sess; + + sess->init_task_tag = pdu->itt; + memcpy(&sess->isid, pdu->isid, 6); + sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); + INIT_LIST_HEAD(&sess->sess_conn_list); + INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list); + INIT_LIST_HEAD(&sess->cr_active_list); + INIT_LIST_HEAD(&sess->cr_inactive_list); + init_completion(&sess->async_msg_comp); + init_completion(&sess->reinstatement_comp); + init_completion(&sess->session_wait_comp); + init_completion(&sess->session_waiting_on_uc_comp); + mutex_init(&sess->cmdsn_mutex); + spin_lock_init(&sess->conn_lock); + spin_lock_init(&sess->cr_a_lock); + spin_lock_init(&sess->cr_i_lock); + spin_lock_init(&sess->session_usage_lock); + spin_lock_init(&sess->ttt_lock); + + timer_setup(&sess->time2retain_timer, + iscsit_handle_time2retain_timeout, 0); + + ret = ida_alloc(&sess_ida, GFP_KERNEL); + if (ret < 0) { + pr_err("Session ID allocation failed %d\n", ret); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + goto free_sess; + } + + sess->session_index = ret; + sess->creation_time = get_jiffies_64(); + /* + * The FFP CmdSN window values will be allocated from the TPG's + * Initiator Node's ACL once the login has been successfully completed. + */ + atomic_set(&sess->max_cmd_sn, be32_to_cpu(pdu->cmdsn)); + + sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL); + if (!sess->sess_ops) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + pr_err("Unable to allocate memory for" + " struct iscsi_sess_ops.\n"); + goto free_id; + } + + sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL); + if (IS_ERR(sess->se_sess)) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + goto free_ops; + } + + return 0; + +free_ops: + kfree(sess->sess_ops); +free_id: + ida_free(&sess_ida, sess->session_index); +free_sess: + kfree(sess); + conn->sess = NULL; + return -ENOMEM; +} + +static int iscsi_login_zero_tsih_s2( + struct iscsit_conn *conn) +{ + struct iscsi_node_attrib *na; + struct iscsit_session *sess = conn->sess; + struct iscsi_param *param; + bool iser = false; + + sess->tpg = conn->tpg; + + /* + * Assign a new TPG Session Handle. Note this is protected with + * struct iscsi_portal_group->np_login_sem from iscsit_access_np(). + */ + sess->tsih = ++sess->tpg->ntsih; + if (!sess->tsih) + sess->tsih = ++sess->tpg->ntsih; + + /* + * Create the default params from user defined values.. + */ + if (iscsi_copy_param_list(&conn->param_list, + conn->tpg->param_list, 1) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) + iser = true; + + iscsi_set_keys_to_negotiate(conn->param_list, iser); + + if (sess->sess_ops->SessionType) + return iscsi_set_keys_irrelevant_for_discovery( + conn->param_list); + + na = iscsit_tpg_get_node_attrib(sess); + + /* + * If ACL allows non-authorized access in TPG with CHAP, + * then set None to AuthMethod. + */ + param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list); + if (param && !strstr(param->value, NONE)) { + if (!iscsi_conn_auth_required(conn)) + if (iscsi_change_param_sprintf(conn, "AuthMethod=%s", + NONE)) + return -1; + } + + /* + * Need to send TargetPortalGroupTag back in first login response + * on any iSCSI connection where the Initiator provides TargetName. + * See 5.3.1. Login Phase Start + * + * In our case, we have already located the struct iscsi_tiqn at this point. + */ + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) + return -1; + + /* + * Workaround for Initiators that have broken connection recovery logic. + * + * "We would really like to get rid of this." Linux-iSCSI.org team + */ + if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl)) + return -1; + + /* + * Set RDMAExtensions=Yes by default for iSER enabled network portals + */ + if (iser) { + struct iscsi_param *param; + unsigned long mrdsl, off; + int rc; + + if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes")) + return -1; + + /* + * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for + * Immediate Data + Unsolicited Data-OUT if necessary.. + */ + param = iscsi_find_param_from_key("MaxRecvDataSegmentLength", + conn->param_list); + if (!param) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + rc = kstrtoul(param->value, 0, &mrdsl); + if (rc < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + off = mrdsl % PAGE_SIZE; + if (!off) + goto check_prot; + + if (mrdsl < PAGE_SIZE) + mrdsl = PAGE_SIZE; + else + mrdsl -= off; + + pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down" + " to PAGE_SIZE\n", mrdsl); + + if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl)) + return -1; + /* + * ISER currently requires that ImmediateData + Unsolicited + * Data be disabled when protection / signature MRs are enabled. + */ +check_prot: + if (sess->se_sess->sup_prot_ops & + (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | + TARGET_PROT_DOUT_INSERT)) { + + if (iscsi_change_param_sprintf(conn, "ImmediateData=No")) + return -1; + + if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes")) + return -1; + + pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" + " T10-PI enabled ISER session\n"); + } + } + + return 0; +} + +static int iscsi_login_non_zero_tsih_s1( + struct iscsit_conn *conn, + unsigned char *buf) +{ + struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + + return iscsi_login_set_conn_values(NULL, conn, pdu->cid); +} + +/* + * Add a new connection to an existing session. + */ +static int iscsi_login_non_zero_tsih_s2( + struct iscsit_conn *conn, + unsigned char *buf) +{ + struct iscsi_portal_group *tpg = conn->tpg; + struct iscsit_session *sess = NULL, *sess_p = NULL; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + struct se_session *se_sess, *se_sess_tmp; + struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + bool iser = false; + + spin_lock_bh(&se_tpg->session_lock); + list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, + sess_list) { + + sess_p = (struct iscsit_session *)se_sess->fabric_sess_ptr; + if (atomic_read(&sess_p->session_fall_back_to_erl0) || + atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || + (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) + continue; + if (!memcmp(sess_p->isid, pdu->isid, 6) && + (sess_p->tsih == be16_to_cpu(pdu->tsih))) { + iscsit_inc_session_usage_count(sess_p); + iscsit_stop_time2retain_timer(sess_p); + sess = sess_p; + break; + } + } + spin_unlock_bh(&se_tpg->session_lock); + + /* + * If the Time2Retain handler has expired, the session is already gone. + */ + if (!sess) { + pr_err("Initiator attempting to add a connection to" + " a non-existent session, rejecting iSCSI Login.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_NO_SESSION); + return -1; + } + + /* + * Stop the Time2Retain timer if this is a failed session, we restart + * the timer if the login is not successful. + */ + spin_lock_bh(&sess->conn_lock); + if (sess->session_state == TARG_SESS_STATE_FAILED) + atomic_set(&sess->session_continuation, 1); + spin_unlock_bh(&sess->conn_lock); + + if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 || + iscsi_copy_param_list(&conn->param_list, + conn->tpg->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) + iser = true; + + iscsi_set_keys_to_negotiate(conn->param_list, iser); + /* + * Need to send TargetPortalGroupTag back in first login response + * on any iSCSI connection where the Initiator provides TargetName. + * See 5.3.1. Login Phase Start + * + * In our case, we have already located the struct iscsi_tiqn at this point. + */ + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) + return -1; + + return 0; +} + +int iscsi_login_post_auth_non_zero_tsih( + struct iscsit_conn *conn, + u16 cid, + u32 exp_statsn) +{ + struct iscsit_conn *conn_ptr = NULL; + struct iscsi_conn_recovery *cr = NULL; + struct iscsit_session *sess = conn->sess; + + /* + * By following item 5 in the login table, if we have found + * an existing ISID and a valid/existing TSIH and an existing + * CID we do connection reinstatement. Currently we dont not + * support it so we send back an non-zero status class to the + * initiator and release the new connection. + */ + conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid); + if (conn_ptr) { + pr_err("Connection exists with CID %hu for %s," + " performing connection reinstatement.\n", + conn_ptr->cid, sess->sess_ops->InitiatorName); + + iscsit_connection_reinstatement_rcfr(conn_ptr); + iscsit_dec_conn_usage_count(conn_ptr); + } + + /* + * Check for any connection recovery entries containing CID. + * We use the original ExpStatSN sent in the first login request + * to acknowledge commands for the failed connection. + * + * Also note that an explict logout may have already been sent, + * but the response may not be sent due to additional connection + * loss. + */ + if (sess->sess_ops->ErrorRecoveryLevel == 2) { + cr = iscsit_get_inactive_connection_recovery_entry( + sess, cid); + if (cr) { + pr_debug("Performing implicit logout" + " for connection recovery on CID: %hu\n", + conn->cid); + iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn); + } + } + + /* + * Else we follow item 4 from the login table in that we have + * found an existing ISID and a valid/existing TSIH and a new + * CID we go ahead and continue to add a new connection to the + * session. + */ + pr_debug("Adding CID %hu to existing session for %s.\n", + cid, sess->sess_ops->InitiatorName); + + if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) { + pr_err("Adding additional connection to this session" + " would exceed MaxConnections %d, login failed.\n", + sess->sess_ops->MaxConnections); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_ISID_ERROR); + return -1; + } + + return 0; +} + +static void iscsi_post_login_start_timers(struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + /* + * FIXME: Unsolicited NopIN support for ISER + */ + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) + return; + + if (!sess->sess_ops->SessionType) + iscsit_start_nopin_timer(conn); +} + +int iscsit_start_kthreads(struct iscsit_conn *conn) +{ + int ret = 0; + + spin_lock(&iscsit_global->ts_bitmap_lock); + conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap, + ISCSIT_BITMAP_BITS, get_order(1)); + spin_unlock(&iscsit_global->ts_bitmap_lock); + + if (conn->bitmap_id < 0) { + pr_err("bitmap_find_free_region() failed for" + " iscsit_start_kthreads()\n"); + return -ENOMEM; + } + + conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn, + "%s", ISCSI_TX_THREAD_NAME); + if (IS_ERR(conn->tx_thread)) { + pr_err("Unable to start iscsi_target_tx_thread\n"); + ret = PTR_ERR(conn->tx_thread); + goto out_bitmap; + } + conn->tx_thread_active = true; + + conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn, + "%s", ISCSI_RX_THREAD_NAME); + if (IS_ERR(conn->rx_thread)) { + pr_err("Unable to start iscsi_target_rx_thread\n"); + ret = PTR_ERR(conn->rx_thread); + goto out_tx; + } + conn->rx_thread_active = true; + + return 0; +out_tx: + send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + conn->tx_thread_active = false; +out_bitmap: + spin_lock(&iscsit_global->ts_bitmap_lock); + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, + get_order(1)); + spin_unlock(&iscsit_global->ts_bitmap_lock); + return ret; +} + +void iscsi_post_login_handler( + struct iscsi_np *np, + struct iscsit_conn *conn, + u8 zero_tsih) +{ + int stop_timer = 0; + struct iscsit_session *sess = conn->sess; + struct se_session *se_sess = sess->se_sess; + struct iscsi_portal_group *tpg = sess->tpg; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + + iscsit_inc_conn_usage_count(conn); + + iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS, + ISCSI_LOGIN_STATUS_ACCEPT); + + pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n"); + conn->conn_state = TARG_CONN_STATE_LOGGED_IN; + + iscsi_set_connection_parameters(conn->conn_ops, conn->param_list); + /* + * SCSI Initiator -> SCSI Target Port Mapping + */ + if (!zero_tsih) { + iscsi_set_session_parameters(sess->sess_ops, + conn->param_list, 0); + iscsi_release_param_list(conn->param_list); + conn->param_list = NULL; + + spin_lock_bh(&sess->conn_lock); + atomic_set(&sess->session_continuation, 0); + if (sess->session_state == TARG_SESS_STATE_FAILED) { + pr_debug("Moving to" + " TARG_SESS_STATE_LOGGED_IN.\n"); + sess->session_state = TARG_SESS_STATE_LOGGED_IN; + stop_timer = 1; + } + + pr_debug("iSCSI Login successful on CID: %hu from %pISpc to" + " %pISpc,%hu\n", conn->cid, &conn->login_sockaddr, + &conn->local_sockaddr, tpg->tpgt); + + list_add_tail(&conn->conn_list, &sess->sess_conn_list); + atomic_inc(&sess->nconn); + pr_debug("Incremented iSCSI Connection count to %d" + " from node: %s\n", atomic_read(&sess->nconn), + sess->sess_ops->InitiatorName); + spin_unlock_bh(&sess->conn_lock); + + iscsi_post_login_start_timers(conn); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads + * are scheduled on the same CPU. + */ + iscsit_thread_get_cpumask(conn); + conn->conn_rx_reset_cpumask = 1; + conn->conn_tx_reset_cpumask = 1; + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); + iscsit_dec_conn_usage_count(conn); + + if (stop_timer) { + spin_lock_bh(&se_tpg->session_lock); + iscsit_stop_time2retain_timer(sess); + spin_unlock_bh(&se_tpg->session_lock); + } + iscsit_dec_session_usage_count(sess); + return; + } + + iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); + iscsi_release_param_list(conn->param_list); + conn->param_list = NULL; + + iscsit_determine_maxcmdsn(sess); + + spin_lock_bh(&se_tpg->session_lock); + __transport_register_session(&sess->tpg->tpg_se_tpg, + se_sess->se_node_acl, se_sess, sess); + pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n"); + sess->session_state = TARG_SESS_STATE_LOGGED_IN; + + pr_debug("iSCSI Login successful on CID: %hu from %pISpc to %pISpc,%hu\n", + conn->cid, &conn->login_sockaddr, &conn->local_sockaddr, + tpg->tpgt); + + spin_lock_bh(&sess->conn_lock); + list_add_tail(&conn->conn_list, &sess->sess_conn_list); + atomic_inc(&sess->nconn); + pr_debug("Incremented iSCSI Connection count to %d from node:" + " %s\n", atomic_read(&sess->nconn), + sess->sess_ops->InitiatorName); + spin_unlock_bh(&sess->conn_lock); + + sess->sid = tpg->sid++; + if (!sess->sid) + sess->sid = tpg->sid++; + pr_debug("Established iSCSI session from node: %s\n", + sess->sess_ops->InitiatorName); + + tpg->nsessions++; + if (tpg->tpg_tiqn) + tpg->tpg_tiqn->tiqn_nsessions++; + + pr_debug("Incremented number of active iSCSI sessions to %u on" + " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); + spin_unlock_bh(&se_tpg->session_lock); + + iscsi_post_login_start_timers(conn); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads + * are scheduled on the same CPU. + */ + iscsit_thread_get_cpumask(conn); + conn->conn_rx_reset_cpumask = 1; + conn->conn_tx_reset_cpumask = 1; + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); + iscsit_dec_conn_usage_count(conn); +} + +int iscsit_setup_np( + struct iscsi_np *np, + struct sockaddr_storage *sockaddr) +{ + struct socket *sock = NULL; + int backlog = ISCSIT_TCP_BACKLOG, ret, len; + + switch (np->np_network_transport) { + case ISCSI_TCP: + np->np_ip_proto = IPPROTO_TCP; + np->np_sock_type = SOCK_STREAM; + break; + case ISCSI_SCTP_TCP: + np->np_ip_proto = IPPROTO_SCTP; + np->np_sock_type = SOCK_STREAM; + break; + case ISCSI_SCTP_UDP: + np->np_ip_proto = IPPROTO_SCTP; + np->np_sock_type = SOCK_SEQPACKET; + break; + default: + pr_err("Unsupported network_transport: %d\n", + np->np_network_transport); + return -EINVAL; + } + + ret = sock_create(sockaddr->ss_family, np->np_sock_type, + np->np_ip_proto, &sock); + if (ret < 0) { + pr_err("sock_create() failed.\n"); + return ret; + } + np->np_socket = sock; + /* + * Setup the np->np_sockaddr from the passed sockaddr setup + * in iscsi_target_configfs.c code.. + */ + memcpy(&np->np_sockaddr, sockaddr, + sizeof(struct sockaddr_storage)); + + if (sockaddr->ss_family == AF_INET6) + len = sizeof(struct sockaddr_in6); + else + len = sizeof(struct sockaddr_in); + /* + * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY. + */ + if (np->np_network_transport == ISCSI_TCP) + tcp_sock_set_nodelay(sock->sk); + sock_set_reuseaddr(sock->sk); + ip_sock_set_freebind(sock->sk); + + ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len); + if (ret < 0) { + pr_err("kernel_bind() failed: %d\n", ret); + goto fail; + } + + ret = kernel_listen(sock, backlog); + if (ret != 0) { + pr_err("kernel_listen() failed: %d\n", ret); + goto fail; + } + + return 0; +fail: + np->np_socket = NULL; + sock_release(sock); + return ret; +} + +int iscsi_target_setup_login_socket( + struct iscsi_np *np, + struct sockaddr_storage *sockaddr) +{ + struct iscsit_transport *t; + int rc; + + t = iscsit_get_transport(np->np_network_transport); + if (!t) + return -EINVAL; + + rc = t->iscsit_setup_np(np, sockaddr); + if (rc < 0) { + iscsit_put_transport(t); + return rc; + } + + np->np_transport = t; + np->enabled = true; + return 0; +} + +int iscsit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn) +{ + struct socket *new_sock, *sock = np->np_socket; + struct sockaddr_in sock_in; + struct sockaddr_in6 sock_in6; + int rc; + + rc = kernel_accept(sock, &new_sock, 0); + if (rc < 0) + return rc; + + conn->sock = new_sock; + conn->login_family = np->np_sockaddr.ss_family; + + if (np->np_sockaddr.ss_family == AF_INET6) { + memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in6, 1); + if (rc >= 0) { + if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) { + memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6)); + } else { + /* Pretend to be an ipv4 socket */ + sock_in.sin_family = AF_INET; + sock_in.sin_port = sock_in6.sin6_port; + memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4); + memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in)); + } + } + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in6, 0); + if (rc >= 0) { + if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) { + memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6)); + } else { + /* Pretend to be an ipv4 socket */ + sock_in.sin_family = AF_INET; + sock_in.sin_port = sock_in6.sin6_port; + memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4); + memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in)); + } + } + } else { + memset(&sock_in, 0, sizeof(struct sockaddr_in)); + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in, 1); + if (rc >= 0) + memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in)); + + rc = conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in, 0); + if (rc >= 0) + memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in)); + } + + return 0; +} + +int iscsit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login) +{ + struct iscsi_login_req *login_req; + u32 padding = 0, payload_length; + + if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0) + return -1; + + login_req = (struct iscsi_login_req *)login->req; + payload_length = ntoh24(login_req->dlength); + padding = ((-payload_length) & 3); + + pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," + " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", + login_req->flags, login_req->itt, login_req->cmdsn, + login_req->exp_statsn, login_req->cid, payload_length); + /* + * Setup the initial iscsi_login values from the leading + * login request PDU. + */ + if (login->first_request) { + login_req = (struct iscsi_login_req *)login->req; + login->leading_connection = (!login_req->tsih) ? 1 : 0; + login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags); + login->version_min = login_req->min_version; + login->version_max = login_req->max_version; + memcpy(login->isid, login_req->isid, 6); + login->cmd_sn = be32_to_cpu(login_req->cmdsn); + login->init_task_tag = login_req->itt; + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); + login->cid = be16_to_cpu(login_req->cid); + login->tsih = be16_to_cpu(login_req->tsih); + } + + if (iscsi_target_check_login_request(conn, login) < 0) + return -1; + + memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); + if (iscsi_login_rx_data(conn, login->req_buf, + payload_length + padding) < 0) + return -1; + + return 0; +} + +int iscsit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login, + u32 length) +{ + if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0) + return -1; + + return 0; +} + +static int +iscsit_conn_set_transport(struct iscsit_conn *conn, struct iscsit_transport *t) +{ + int rc; + + if (!t->owner) { + conn->conn_transport = t; + return 0; + } + + rc = try_module_get(t->owner); + if (!rc) { + pr_err("try_module_get() failed for %s\n", t->name); + return -EINVAL; + } + + conn->conn_transport = t; + return 0; +} + +static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np) +{ + struct iscsit_conn *conn; + + conn = kzalloc(sizeof(struct iscsit_conn), GFP_KERNEL); + if (!conn) { + pr_err("Could not allocate memory for new connection\n"); + return NULL; + } + pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); + conn->conn_state = TARG_CONN_STATE_FREE; + + init_waitqueue_head(&conn->queues_wq); + INIT_LIST_HEAD(&conn->conn_list); + INIT_LIST_HEAD(&conn->conn_cmd_list); + INIT_LIST_HEAD(&conn->immed_queue_list); + INIT_LIST_HEAD(&conn->response_queue_list); + init_completion(&conn->conn_post_wait_comp); + init_completion(&conn->conn_wait_comp); + init_completion(&conn->conn_wait_rcfr_comp); + init_completion(&conn->conn_waiting_on_uc_comp); + init_completion(&conn->conn_logout_comp); + init_completion(&conn->rx_half_close_comp); + init_completion(&conn->tx_half_close_comp); + init_completion(&conn->rx_login_comp); + spin_lock_init(&conn->cmd_lock); + spin_lock_init(&conn->conn_usage_lock); + spin_lock_init(&conn->immed_queue_lock); + spin_lock_init(&conn->nopin_timer_lock); + spin_lock_init(&conn->response_queue_lock); + spin_lock_init(&conn->state_lock); + spin_lock_init(&conn->login_worker_lock); + spin_lock_init(&conn->login_timer_lock); + + timer_setup(&conn->nopin_response_timer, + iscsit_handle_nopin_response_timeout, 0); + timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); + timer_setup(&conn->login_timer, iscsit_login_timeout, 0); + + if (iscsit_conn_set_transport(conn, np->np_transport) < 0) + goto free_conn; + + conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); + if (!conn->conn_ops) { + pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n"); + goto put_transport; + } + + if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { + pr_err("Unable to allocate conn->conn_cpumask\n"); + goto free_conn_ops; + } + + if (!zalloc_cpumask_var(&conn->allowed_cpumask, GFP_KERNEL)) { + pr_err("Unable to allocate conn->allowed_cpumask\n"); + goto free_conn_cpumask; + } + + conn->cmd_cnt = target_alloc_cmd_counter(); + if (!conn->cmd_cnt) + goto free_conn_allowed_cpumask; + + return conn; + +free_conn_allowed_cpumask: + free_cpumask_var(conn->allowed_cpumask); +free_conn_cpumask: + free_cpumask_var(conn->conn_cpumask); +free_conn_ops: + kfree(conn->conn_ops); +put_transport: + iscsit_put_transport(conn->conn_transport); +free_conn: + kfree(conn); + return NULL; +} + +void iscsit_free_conn(struct iscsit_conn *conn) +{ + target_free_cmd_counter(conn->cmd_cnt); + free_cpumask_var(conn->allowed_cpumask); + free_cpumask_var(conn->conn_cpumask); + kfree(conn->conn_ops); + iscsit_put_transport(conn->conn_transport); + kfree(conn); +} + +void iscsi_target_login_sess_out(struct iscsit_conn *conn, + bool zero_tsih, bool new_sess) +{ + if (!new_sess) + goto old_sess_out; + + pr_err("iSCSI Login negotiation failed.\n"); + iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + if (!zero_tsih || !conn->sess) + goto old_sess_out; + + transport_free_session(conn->sess->se_sess); + ida_free(&sess_ida, conn->sess->session_index); + kfree(conn->sess->sess_ops); + kfree(conn->sess); + conn->sess = NULL; + +old_sess_out: + /* + * If login negotiation fails check if the Time2Retain timer + * needs to be restarted. + */ + if (!zero_tsih && conn->sess) { + spin_lock_bh(&conn->sess->conn_lock); + if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { + struct se_portal_group *se_tpg = + &conn->tpg->tpg_se_tpg; + + atomic_set(&conn->sess->session_continuation, 0); + spin_unlock_bh(&conn->sess->conn_lock); + spin_lock_bh(&se_tpg->session_lock); + iscsit_start_time2retain_handler(conn->sess); + spin_unlock_bh(&se_tpg->session_lock); + } else + spin_unlock_bh(&conn->sess->conn_lock); + iscsit_dec_session_usage_count(conn->sess); + } + + ahash_request_free(conn->conn_tx_hash); + if (conn->conn_rx_hash) { + struct crypto_ahash *tfm; + + tfm = crypto_ahash_reqtfm(conn->conn_rx_hash); + ahash_request_free(conn->conn_rx_hash); + crypto_free_ahash(tfm); + } + + if (conn->param_list) { + iscsi_release_param_list(conn->param_list); + conn->param_list = NULL; + } + iscsi_target_nego_release(conn); + + if (conn->sock) { + sock_release(conn->sock); + conn->sock = NULL; + } + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + + if (conn->conn_transport->iscsit_free_conn) + conn->conn_transport->iscsit_free_conn(conn); + + iscsit_free_conn(conn); +} + +static int __iscsi_target_login_thread(struct iscsi_np *np) +{ + u8 *buffer, zero_tsih = 0; + int ret = 0, rc; + struct iscsit_conn *conn = NULL; + struct iscsi_login *login; + struct iscsi_portal_group *tpg = NULL; + struct iscsi_login_req *pdu; + struct iscsi_tpg_np *tpg_np; + bool new_sess = false; + + flush_signals(current); + + spin_lock_bh(&np->np_thread_lock); + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + spin_unlock_bh(&np->np_thread_lock); + complete(&np->np_restart_comp); + return 1; + } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { + spin_unlock_bh(&np->np_thread_lock); + goto exit; + } else { + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + } + spin_unlock_bh(&np->np_thread_lock); + + conn = iscsit_alloc_conn(np); + if (!conn) { + /* Get another socket */ + return 1; + } + + rc = np->np_transport->iscsit_accept_np(np, conn); + if (rc == -ENOSYS) { + complete(&np->np_restart_comp); + iscsit_free_conn(conn); + goto exit; + } else if (rc < 0) { + spin_lock_bh(&np->np_thread_lock); + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + spin_unlock_bh(&np->np_thread_lock); + complete(&np->np_restart_comp); + iscsit_free_conn(conn); + /* Get another socket */ + return 1; + } + spin_unlock_bh(&np->np_thread_lock); + iscsit_free_conn(conn); + return 1; + } + /* + * Perform the remaining iSCSI connection initialization items.. + */ + login = iscsi_login_init_conn(conn); + if (!login) { + goto new_sess_out; + } + + iscsit_start_login_timer(conn, current); + + pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); + conn->conn_state = TARG_CONN_STATE_XPT_UP; + /* + * This will process the first login request + payload.. + */ + rc = np->np_transport->iscsit_get_login_rx(conn, login); + if (rc == 1) + return 1; + else if (rc < 0) + goto new_sess_out; + + buffer = &login->req[0]; + pdu = (struct iscsi_login_req *)buffer; + /* + * Used by iscsit_tx_login_rsp() for Login Resonses PDUs + * when Status-Class != 0. + */ + conn->login_itt = pdu->itt; + + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { + spin_unlock_bh(&np->np_thread_lock); + pr_err("iSCSI Network Portal on %pISpc currently not" + " active.\n", &np->np_sockaddr); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); + goto new_sess_out; + } + spin_unlock_bh(&np->np_thread_lock); + + conn->network_transport = np->np_network_transport; + + pr_debug("Received iSCSI login request from %pISpc on %s Network" + " Portal %pISpc\n", &conn->login_sockaddr, np->np_transport->name, + &conn->local_sockaddr); + + pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); + conn->conn_state = TARG_CONN_STATE_IN_LOGIN; + + if (iscsi_login_check_initiator_version(conn, pdu->max_version, + pdu->min_version) < 0) + goto new_sess_out; + + zero_tsih = (pdu->tsih == 0x0000); + if (zero_tsih) { + /* + * This is the leading connection of a new session. + * We wait until after authentication to check for + * session reinstatement. + */ + if (iscsi_login_zero_tsih_s1(conn, buffer) < 0) + goto new_sess_out; + } else { + /* + * Add a new connection to an existing session. + * We check for a non-existant session in + * iscsi_login_non_zero_tsih_s2() below based + * on ISID/TSIH, but wait until after authentication + * to check for connection reinstatement, etc. + */ + if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0) + goto new_sess_out; + } + /* + * SessionType: Discovery + * + * Locates Default Portal + * + * SessionType: Normal + * + * Locates Target Portal from NP -> Target IQN + */ + rc = iscsi_target_locate_portal(np, conn, login); + if (rc < 0) { + tpg = conn->tpg; + goto new_sess_out; + } + login->zero_tsih = zero_tsih; + + if (conn->sess) + conn->sess->se_sess->sup_prot_ops = + conn->conn_transport->iscsit_get_sup_prot_ops(conn); + + tpg = conn->tpg; + if (!tpg) { + pr_err("Unable to locate struct iscsit_conn->tpg\n"); + goto new_sess_out; + } + + if (zero_tsih) { + if (iscsi_login_zero_tsih_s2(conn) < 0) + goto new_sess_out; + } else { + if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) + goto old_sess_out; + } + + if (conn->conn_transport->iscsit_validate_params) { + ret = conn->conn_transport->iscsit_validate_params(conn); + if (ret < 0) { + if (zero_tsih) + goto new_sess_out; + else + goto old_sess_out; + } + } + + ret = iscsi_target_start_negotiation(login, conn); + if (ret < 0) + goto new_sess_out; + + if (ret == 1) { + tpg_np = conn->tpg_np; + + iscsi_post_login_handler(np, conn, zero_tsih); + iscsit_deaccess_np(np, tpg, tpg_np); + } + + tpg = NULL; + tpg_np = NULL; + /* Get another socket */ + return 1; + +new_sess_out: + new_sess = true; +old_sess_out: + iscsit_stop_login_timer(conn); + tpg_np = conn->tpg_np; + iscsi_target_login_sess_out(conn, zero_tsih, new_sess); + new_sess = false; + + if (tpg) { + iscsit_deaccess_np(np, tpg, tpg_np); + tpg = NULL; + tpg_np = NULL; + } + + return 1; + +exit: + spin_lock_bh(&np->np_thread_lock); + np->np_thread_state = ISCSI_NP_THREAD_EXIT; + spin_unlock_bh(&np->np_thread_lock); + + return 0; +} + +int iscsi_target_login_thread(void *arg) +{ + struct iscsi_np *np = arg; + int ret; + + allow_signal(SIGINT); + + while (1) { + ret = __iscsi_target_login_thread(np); + /* + * We break and exit here unless another sock_accept() call + * is expected. + */ + if (ret != 1) + break; + } + + while (!kthread_should_stop()) { + msleep(100); + } + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h new file mode 100644 index 0000000000..3ca2f232b3 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_LOGIN_H +#define ISCSI_TARGET_LOGIN_H + +#include <linux/types.h> + +struct iscsit_conn; +struct iscsi_login; +struct iscsi_np; +struct sockaddr_storage; + +extern int iscsi_login_setup_crypto(struct iscsit_conn *); +extern int iscsi_check_for_session_reinstatement(struct iscsit_conn *); +extern int iscsi_login_post_auth_non_zero_tsih(struct iscsit_conn *, u16, u32); +extern int iscsit_setup_np(struct iscsi_np *, + struct sockaddr_storage *); +extern int iscsi_target_setup_login_socket(struct iscsi_np *, + struct sockaddr_storage *); +extern int iscsit_accept_np(struct iscsi_np *, struct iscsit_conn *); +extern int iscsit_get_login_rx(struct iscsit_conn *, struct iscsi_login *); +extern int iscsit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32); +extern void iscsit_free_conn(struct iscsit_conn *); +extern int iscsit_start_kthreads(struct iscsit_conn *); +extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8); +extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool); +extern int iscsi_target_login_thread(void *); +extern void iscsi_handle_login_thread_timeout(struct timer_list *t); + +#endif /*** ISCSI_TARGET_LOGIN_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c new file mode 100644 index 0000000000..fa3fb5f4e6 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -0,0 +1,1408 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains main functions related to iSCSI Parameter negotiation. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/ctype.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/sched/signal.h> +#include <net/sock.h> +#include <trace/events/sock.h> +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_parameters.h" +#include "iscsi_target_login.h" +#include "iscsi_target_nego.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" +#include "iscsi_target_auth.h" + +#define MAX_LOGIN_PDUS 7 + +void convert_null_to_semi(char *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + if (buf[i] == '\0') + buf[i] = ';'; +} + +static int strlen_semi(char *buf) +{ + int i = 0; + + while (buf[i] != '\0') { + if (buf[i] == ';') + return i; + i++; + } + + return -1; +} + +int extract_param( + const char *in_buf, + const char *pattern, + unsigned int max_length, + char *out_buf, + unsigned char *type) +{ + char *ptr; + int len; + + if (!in_buf || !pattern || !out_buf || !type) + return -EINVAL; + + ptr = strstr(in_buf, pattern); + if (!ptr) + return -ENOENT; + + ptr = strstr(ptr, "="); + if (!ptr) + return -EINVAL; + + ptr += 1; + if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) { + ptr += 2; /* skip 0x */ + *type = HEX; + } else if (*ptr == '0' && (*(ptr+1) == 'b' || *(ptr+1) == 'B')) { + ptr += 2; /* skip 0b */ + *type = BASE64; + } else + *type = DECIMAL; + + len = strlen_semi(ptr); + if (len < 0) + return -EINVAL; + + if (len >= max_length) { + pr_err("Length of input: %d exceeds max_length:" + " %d\n", len, max_length); + return -EINVAL; + } + memcpy(out_buf, ptr, len); + out_buf[len] = '\0'; + + return 0; +} + +static struct iscsi_node_auth *iscsi_get_node_auth(struct iscsit_conn *conn) +{ + struct iscsi_portal_group *tpg; + struct iscsi_node_acl *nacl; + struct se_node_acl *se_nacl; + + if (conn->sess->sess_ops->SessionType) + return &iscsit_global->discovery_acl.node_auth; + + se_nacl = conn->sess->se_sess->se_node_acl; + if (!se_nacl) { + pr_err("Unable to locate struct se_node_acl for CHAP auth\n"); + return NULL; + } + + if (se_nacl->dynamic_node_acl) { + tpg = to_iscsi_tpg(se_nacl->se_tpg); + return &tpg->tpg_demo_auth; + } + + nacl = to_iscsi_nacl(se_nacl); + + return &nacl->node_auth; +} + +static u32 iscsi_handle_authentication( + struct iscsit_conn *conn, + char *in_buf, + char *out_buf, + int in_length, + int *out_length, + unsigned char *authtype) +{ + struct iscsi_node_auth *auth; + + auth = iscsi_get_node_auth(conn); + if (!auth) + return -1; + + if (strstr("CHAP", authtype)) + strcpy(conn->sess->auth_type, "CHAP"); + else + strcpy(conn->sess->auth_type, NONE); + + if (strstr("None", authtype)) + return 1; + else if (strstr("CHAP", authtype)) + return chap_main_loop(conn, auth, in_buf, out_buf, + &in_length, out_length); + /* SRP, SPKM1, SPKM2 and KRB5 are unsupported */ + return 2; +} + +static void iscsi_remove_failed_auth_entry(struct iscsit_conn *conn) +{ + kfree(conn->auth_protocol); +} + +int iscsi_target_check_login_request( + struct iscsit_conn *conn, + struct iscsi_login *login) +{ + int req_csg, req_nsg; + u32 payload_length; + struct iscsi_login_req *login_req; + + login_req = (struct iscsi_login_req *) login->req; + payload_length = ntoh24(login_req->dlength); + + switch (login_req->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + break; + default: + pr_err("Received unknown opcode 0x%02x.\n", + login_req->opcode & ISCSI_OPCODE_MASK); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) && + (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { + pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE" + " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags); + req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags); + + if (req_csg != login->current_stage) { + pr_err("Initiator unexpectedly changed login stage" + " from %d to %d, login failed.\n", login->current_stage, + req_csg); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if ((req_nsg == 2) || (req_csg >= 2) || + ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) && + (req_nsg <= req_csg))) { + pr_err("Illegal login_req->flags Combination, CSG: %d," + " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg, + req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if ((login_req->max_version != login->version_max) || + (login_req->min_version != login->version_min)) { + pr_err("Login request changed Version Max/Nin" + " unexpectedly to 0x%02x/0x%02x, protocol error\n", + login_req->max_version, login_req->min_version); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if (memcmp(login_req->isid, login->isid, 6) != 0) { + pr_err("Login request changed ISID unexpectedly," + " protocol error.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if (login_req->itt != login->init_task_tag) { + pr_err("Login request changed ITT unexpectedly to" + " 0x%08x, protocol error.\n", login_req->itt); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if (payload_length > MAX_KEY_VALUE_PAIRS) { + pr_err("Login request payload exceeds default" + " MaxRecvDataSegmentLength: %u, protocol error.\n", + MAX_KEY_VALUE_PAIRS); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(iscsi_target_check_login_request); + +static int iscsi_target_check_first_request( + struct iscsit_conn *conn, + struct iscsi_login *login) +{ + struct iscsi_param *param = NULL; + struct se_node_acl *se_nacl; + + login->first_request = 0; + + list_for_each_entry(param, &conn->param_list->param_list, p_list) { + if (!strncmp(param->name, SESSIONTYPE, 11)) { + if (!IS_PSTATE_ACCEPTOR(param)) { + pr_err("SessionType key not received" + " in first login request.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_MISSING_FIELDS); + return -1; + } + if (!strncmp(param->value, DISCOVERY, 9)) + return 0; + } + + if (!strncmp(param->name, INITIATORNAME, 13)) { + if (!IS_PSTATE_ACCEPTOR(param)) { + if (!login->leading_connection) + continue; + + pr_err("InitiatorName key not received" + " in first login request.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_MISSING_FIELDS); + return -1; + } + + /* + * For non-leading connections, double check that the + * received InitiatorName matches the existing session's + * struct iscsi_node_acl. + */ + if (!login->leading_connection) { + se_nacl = conn->sess->se_sess->se_node_acl; + if (!se_nacl) { + pr_err("Unable to locate" + " struct se_node_acl\n"); + iscsit_tx_login_rsp(conn, + ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_TGT_NOT_FOUND); + return -1; + } + + if (strcmp(param->value, + se_nacl->initiatorname)) { + pr_err("Incorrect" + " InitiatorName: %s for this" + " iSCSI Initiator Node.\n", + param->value); + iscsit_tx_login_rsp(conn, + ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_TGT_NOT_FOUND); + return -1; + } + } + } + } + + return 0; +} + +static int iscsi_target_do_tx_login_io(struct iscsit_conn *conn, struct iscsi_login *login) +{ + u32 padding = 0; + struct iscsi_login_rsp *login_rsp; + + login_rsp = (struct iscsi_login_rsp *) login->rsp; + + login_rsp->opcode = ISCSI_OP_LOGIN_RSP; + hton24(login_rsp->dlength, login->rsp_length); + memcpy(login_rsp->isid, login->isid, 6); + login_rsp->tsih = cpu_to_be16(login->tsih); + login_rsp->itt = login->init_task_tag; + login_rsp->statsn = cpu_to_be32(conn->stat_sn++); + login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); + login_rsp->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); + + pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x," + " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:" + " %u\n", login_rsp->flags, (__force u32)login_rsp->itt, + ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn), + ntohl(login_rsp->statsn), login->rsp_length); + + padding = ((-login->rsp_length) & 3); + /* + * Before sending the last login response containing the transition + * bit for full-feature-phase, go ahead and start up TX/RX threads + * now to avoid potential resource allocation failures after the + * final login response has been sent. + */ + if (login->login_complete) { + int rc = iscsit_start_kthreads(conn); + if (rc) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + } + + if (conn->conn_transport->iscsit_put_login_tx(conn, login, + login->rsp_length + padding) < 0) + goto err; + + login->rsp_length = 0; + + return 0; + +err: + if (login->login_complete) { + if (conn->rx_thread && conn->rx_thread_active) { + send_sig(SIGINT, conn->rx_thread, 1); + complete(&conn->rx_login_comp); + kthread_stop(conn->rx_thread); + } + if (conn->tx_thread && conn->tx_thread_active) { + send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + } + spin_lock(&iscsit_global->ts_bitmap_lock); + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, + get_order(1)); + spin_unlock(&iscsit_global->ts_bitmap_lock); + } + return -1; +} + +static void iscsi_target_sk_data_ready(struct sock *sk) +{ + struct iscsit_conn *conn = sk->sk_user_data; + bool rc; + + trace_sk_data_ready(sk); + pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn); + + write_lock_bh(&sk->sk_callback_lock); + if (!sk->sk_user_data) { + write_unlock_bh(&sk->sk_callback_lock); + return; + } + if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) { + write_unlock_bh(&sk->sk_callback_lock); + pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn); + return; + } + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + write_unlock_bh(&sk->sk_callback_lock); + pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn); + return; + } + if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { + write_unlock_bh(&sk->sk_callback_lock); + pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); + if (iscsi_target_sk_data_ready == conn->orig_data_ready) + return; + conn->orig_data_ready(sk); + return; + } + + rc = schedule_delayed_work(&conn->login_work, 0); + if (!rc) { + pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work" + " got false\n"); + } + write_unlock_bh(&sk->sk_callback_lock); +} + +static void iscsi_target_sk_state_change(struct sock *); + +static void iscsi_target_set_sock_callbacks(struct iscsit_conn *conn) +{ + struct sock *sk; + + if (!conn->sock) + return; + + sk = conn->sock->sk; + pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn); + + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = conn; + conn->orig_data_ready = sk->sk_data_ready; + conn->orig_state_change = sk->sk_state_change; + sk->sk_data_ready = iscsi_target_sk_data_ready; + sk->sk_state_change = iscsi_target_sk_state_change; + write_unlock_bh(&sk->sk_callback_lock); + + sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ; + sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ; +} + +static void iscsi_target_restore_sock_callbacks(struct iscsit_conn *conn) +{ + struct sock *sk; + + if (!conn->sock) + return; + + sk = conn->sock->sk; + pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn); + + write_lock_bh(&sk->sk_callback_lock); + if (!sk->sk_user_data) { + write_unlock_bh(&sk->sk_callback_lock); + return; + } + sk->sk_user_data = NULL; + sk->sk_data_ready = conn->orig_data_ready; + sk->sk_state_change = conn->orig_state_change; + write_unlock_bh(&sk->sk_callback_lock); + + sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; + sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; +} + +static int iscsi_target_do_login(struct iscsit_conn *, struct iscsi_login *); + +static bool __iscsi_target_sk_check_close(struct sock *sk) +{ + if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," + "returning TRUE\n"); + return true; + } + return false; +} + +static bool iscsi_target_sk_check_close(struct iscsit_conn *conn) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_flag(struct iscsit_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = test_bit(flag, &conn->login_flags); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_and_clear(struct iscsit_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + if (!state) + clear_bit(flag, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login *login) +{ + bool zero_tsih = login->zero_tsih; + + iscsi_remove_failed_auth_entry(conn); + iscsi_target_nego_release(conn); + iscsi_target_login_sess_out(conn, zero_tsih, true); +} + +static void iscsi_target_do_login_rx(struct work_struct *work) +{ + struct iscsit_conn *conn = container_of(work, + struct iscsit_conn, login_work.work); + struct iscsi_login *login = conn->login; + struct iscsi_np *np = login->np; + struct iscsi_portal_group *tpg = conn->tpg; + struct iscsi_tpg_np *tpg_np = conn->tpg_np; + int rc, zero_tsih = login->zero_tsih; + bool state; + + pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", + conn, current->comm, current->pid); + + spin_lock(&conn->login_worker_lock); + set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags); + spin_unlock(&conn->login_worker_lock); + /* + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() + * before initial PDU processing in iscsi_target_start_negotiation() + * has completed, go ahead and retry until it's cleared. + * + * Otherwise if the TCP connection drops while this is occuring, + * iscsi_target_start_negotiation() will detect the failure, call + * cancel_delayed_work_sync(&conn->login_work), and cleanup the + * remaining iscsi connection resources from iscsi_np process context. + */ + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); + return; + } + + spin_lock(&tpg->tpg_state_lock); + state = (tpg->tpg_state == TPG_STATE_ACTIVE); + spin_unlock(&tpg->tpg_state_lock); + + if (!state) { + pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); + goto err; + } + + if (iscsi_target_sk_check_close(conn)) { + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); + goto err; + } + + allow_signal(SIGINT); + rc = iscsit_set_login_timer_kworker(conn, current); + if (rc < 0) { + /* The login timer has already expired */ + pr_debug("iscsi_target_do_login_rx, login failed\n"); + goto err; + } + + rc = conn->conn_transport->iscsit_get_login_rx(conn, login); + flush_signals(current); + + if (rc < 0) + goto err; + + pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", + conn, current->comm, current->pid); + + /* + * LOGIN_FLAGS_READ_ACTIVE is cleared so that sk_data_ready + * could be triggered again after this. + * + * LOGIN_FLAGS_WRITE_ACTIVE is cleared after we successfully + * process a login PDU, so that sk_state_chage can do login + * cleanup as needed if the socket is closed. If a delayed work is + * ongoing (LOGIN_FLAGS_WRITE_ACTIVE or LOGIN_FLAGS_READ_ACTIVE), + * sk_state_change will leave the cleanup to the delayed work or + * it will schedule a delayed work to do cleanup. + */ + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + if (!test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags)) { + clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); + set_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags); + } + write_unlock_bh(&sk->sk_callback_lock); + } + + rc = iscsi_target_do_login(conn, login); + if (rc < 0) { + goto err; + } else if (!rc) { + if (iscsi_target_sk_check_and_clear(conn, + LOGIN_FLAGS_WRITE_ACTIVE)) + goto err; + + /* + * Set the login timer thread pointer to NULL to prevent the + * login process from getting stuck if the initiator + * stops sending data. + */ + rc = iscsit_set_login_timer_kworker(conn, NULL); + if (rc < 0) + goto err; + } else if (rc == 1) { + iscsit_stop_login_timer(conn); + cancel_delayed_work(&conn->login_work); + iscsi_target_nego_release(conn); + iscsi_post_login_handler(np, conn, zero_tsih); + iscsit_deaccess_np(np, tpg, tpg_np); + } + return; + +err: + iscsi_target_restore_sock_callbacks(conn); + iscsit_stop_login_timer(conn); + cancel_delayed_work(&conn->login_work); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); +} + +static void iscsi_target_sk_state_change(struct sock *sk) +{ + struct iscsit_conn *conn; + void (*orig_state_change)(struct sock *); + bool state; + + pr_debug("Entering iscsi_target_sk_state_change\n"); + + write_lock_bh(&sk->sk_callback_lock); + conn = sk->sk_user_data; + if (!conn) { + write_unlock_bh(&sk->sk_callback_lock); + return; + } + orig_state_change = conn->orig_state_change; + + if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n", + conn); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } + state = __iscsi_target_sk_check_close(sk); + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); + + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags) || + test_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_{READ|WRITE}_ACTIVE=1" + " sk_state_change conn: %p\n", conn); + if (state) + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", + conn); + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); + return; + } + /* + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, + * but only queue conn->login_work -> iscsi_target_do_login_rx() + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. + * + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() + * will detect the dropped TCP connection from delayed workqueue context. + * + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial + * iscsi_target_start_negotiation() is running, iscsi_target_do_login() + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() + * via iscsi_target_sk_check_and_clear() is responsible for detecting the + * dropped TCP connection in iscsi_np process context, and cleaning up + * the remaining iscsi connection resources. + */ + if (state) { + pr_debug("iscsi_target_sk_state_change got failed state\n"); + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + + orig_state_change(sk); + + if (!state) + schedule_delayed_work(&conn->login_work, 0); + return; + } + write_unlock_bh(&sk->sk_callback_lock); + + orig_state_change(sk); +} + +/* + * NOTE: We check for existing sessions or connections AFTER the initiator + * has been successfully authenticated in order to protect against faked + * ISID/TSIH combinations. + */ +static int iscsi_target_check_for_existing_instances( + struct iscsit_conn *conn, + struct iscsi_login *login) +{ + if (login->checked_for_existing) + return 0; + + login->checked_for_existing = 1; + + if (!login->tsih) + return iscsi_check_for_session_reinstatement(conn); + else + return iscsi_login_post_auth_non_zero_tsih(conn, login->cid, + login->initial_exp_statsn); +} + +static int iscsi_target_do_authentication( + struct iscsit_conn *conn, + struct iscsi_login *login) +{ + int authret; + u32 payload_length; + struct iscsi_param *param; + struct iscsi_login_req *login_req; + struct iscsi_login_rsp *login_rsp; + + login_req = (struct iscsi_login_req *) login->req; + login_rsp = (struct iscsi_login_rsp *) login->rsp; + payload_length = ntoh24(login_req->dlength); + + param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list); + if (!param) + return -1; + + authret = iscsi_handle_authentication( + conn, + login->req_buf, + login->rsp_buf, + payload_length, + &login->rsp_length, + param->value); + switch (authret) { + case 0: + pr_debug("Received OK response" + " from LIO Authentication, continuing.\n"); + break; + case 1: + pr_debug("iSCSI security negotiation" + " completed successfully.\n"); + login->auth_complete = 1; + if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) && + (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { + login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 | + ISCSI_FLAG_LOGIN_TRANSIT); + login->current_stage = 1; + } + return iscsi_target_check_for_existing_instances( + conn, login); + case 2: + pr_err("Security negotiation" + " failed.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_AUTH_FAILED); + return -1; + default: + pr_err("Received unknown error %d from LIO" + " Authentication\n", authret); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_TARGET_ERROR); + return -1; + } + + return 0; +} + +bool iscsi_conn_auth_required(struct iscsit_conn *conn) +{ + struct iscsi_node_acl *nacl; + struct se_node_acl *se_nacl; + + if (conn->sess->sess_ops->SessionType) { + /* + * For SessionType=Discovery + */ + return conn->tpg->tpg_attrib.authentication; + } + /* + * For SessionType=Normal + */ + se_nacl = conn->sess->se_sess->se_node_acl; + if (!se_nacl) { + pr_debug("Unknown ACL is trying to connect\n"); + return true; + } + + if (se_nacl->dynamic_node_acl) { + pr_debug("Dynamic ACL %s is trying to connect\n", + se_nacl->initiatorname); + return conn->tpg->tpg_attrib.authentication; + } + + pr_debug("Known ACL %s is trying to connect\n", + se_nacl->initiatorname); + + nacl = to_iscsi_nacl(se_nacl); + if (nacl->node_attrib.authentication == NA_AUTHENTICATION_INHERITED) + return conn->tpg->tpg_attrib.authentication; + + return nacl->node_attrib.authentication; +} + +static int iscsi_target_handle_csg_zero( + struct iscsit_conn *conn, + struct iscsi_login *login) +{ + int ret; + u32 payload_length; + struct iscsi_param *param; + struct iscsi_login_req *login_req; + struct iscsi_login_rsp *login_rsp; + + login_req = (struct iscsi_login_req *) login->req; + login_rsp = (struct iscsi_login_rsp *) login->rsp; + payload_length = ntoh24(login_req->dlength); + + param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list); + if (!param) + return -1; + + ret = iscsi_decode_text_input( + PHASE_SECURITY|PHASE_DECLARATIVE, + SENDER_INITIATOR|SENDER_RECEIVER, + login->req_buf, + payload_length, + conn); + if (ret < 0) + return -1; + + if (ret > 0) { + if (login->auth_complete) { + pr_err("Initiator has already been" + " successfully authenticated, but is still" + " sending %s keys.\n", param->value); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + goto do_auth; + } else if (!payload_length) { + pr_err("Initiator sent zero length security payload," + " login failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_AUTH_FAILED); + return -1; + } + + if (login->first_request) + if (iscsi_target_check_first_request(conn, login) < 0) + return -1; + + ret = iscsi_encode_text_output( + PHASE_SECURITY|PHASE_DECLARATIVE, + SENDER_TARGET, + login->rsp_buf, + &login->rsp_length, + conn->param_list, + conn->tpg->tpg_attrib.login_keys_workaround); + if (ret < 0) + return -1; + + if (!iscsi_check_negotiated_keys(conn->param_list)) { + bool auth_required = iscsi_conn_auth_required(conn); + + if (auth_required) { + if (!strncmp(param->value, NONE, 4)) { + pr_err("Initiator sent AuthMethod=None but" + " Target is enforcing iSCSI Authentication," + " login failed.\n"); + iscsit_tx_login_rsp(conn, + ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_AUTH_FAILED); + return -1; + } + + if (!login->auth_complete) + return 0; + + if (strncmp(param->value, NONE, 4) && + !login->auth_complete) + return 0; + } + + if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) && + (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { + login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 | + ISCSI_FLAG_LOGIN_TRANSIT; + login->current_stage = 1; + } + } + + return 0; +do_auth: + return iscsi_target_do_authentication(conn, login); +} + +static bool iscsi_conn_authenticated(struct iscsit_conn *conn, + struct iscsi_login *login) +{ + if (!iscsi_conn_auth_required(conn)) + return true; + + if (login->auth_complete) + return true; + + return false; +} + +static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_login *login) +{ + int ret; + u32 payload_length; + struct iscsi_login_req *login_req; + struct iscsi_login_rsp *login_rsp; + + login_req = (struct iscsi_login_req *) login->req; + login_rsp = (struct iscsi_login_rsp *) login->rsp; + payload_length = ntoh24(login_req->dlength); + + ret = iscsi_decode_text_input( + PHASE_OPERATIONAL|PHASE_DECLARATIVE, + SENDER_INITIATOR|SENDER_RECEIVER, + login->req_buf, + payload_length, + conn); + if (ret < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if (login->first_request) + if (iscsi_target_check_first_request(conn, login) < 0) + return -1; + + if (iscsi_target_check_for_existing_instances(conn, login) < 0) + return -1; + + ret = iscsi_encode_text_output( + PHASE_OPERATIONAL|PHASE_DECLARATIVE, + SENDER_TARGET, + login->rsp_buf, + &login->rsp_length, + conn->param_list, + conn->tpg->tpg_attrib.login_keys_workaround); + if (ret < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_INIT_ERR); + return -1; + } + + if (!iscsi_conn_authenticated(conn, login)) { + pr_err("Initiator is requesting CSG: 1, has not been" + " successfully authenticated, and the Target is" + " enforcing iSCSI Authentication, login failed.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_AUTH_FAILED); + return -1; + } + + if (!iscsi_check_negotiated_keys(conn->param_list)) + if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) && + (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) + login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 | + ISCSI_FLAG_LOGIN_TRANSIT; + + return 0; +} + +/* + * RETURN VALUE: + * + * 1 = Login successful + * -1 = Login failed + * 0 = More PDU exchanges required + */ +static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *login) +{ + int pdu_count = 0; + struct iscsi_login_req *login_req; + struct iscsi_login_rsp *login_rsp; + + login_req = (struct iscsi_login_req *) login->req; + login_rsp = (struct iscsi_login_rsp *) login->rsp; + + while (1) { + if (++pdu_count > MAX_LOGIN_PDUS) { + pr_err("MAX_LOGIN_PDUS count reached.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_TARGET_ERROR); + return -1; + } + + switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) { + case 0: + login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK; + if (iscsi_target_handle_csg_zero(conn, login) < 0) + return -1; + break; + case 1: + login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1; + if (iscsi_target_handle_csg_one(conn, login) < 0) + return -1; + if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { + /* + * Check to make sure the TCP connection has not + * dropped asynchronously while session reinstatement + * was occuring in this kthread context, before + * transitioning to full feature phase operation. + */ + if (iscsi_target_sk_check_close(conn)) + return -1; + + login->tsih = conn->sess->tsih; + login->login_complete = 1; + iscsi_target_restore_sock_callbacks(conn); + if (iscsi_target_do_tx_login_io(conn, + login) < 0) + return -1; + return 1; + } + break; + default: + pr_err("Illegal CSG: %d received from" + " Initiator, protocol error.\n", + ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)); + break; + } + + if (iscsi_target_do_tx_login_io(conn, login) < 0) + return -1; + + if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { + login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT; + login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; + } + break; + } + + return 0; +} + +static void iscsi_initiatorname_tolower( + char *param_buf) +{ + char *c; + u32 iqn_size = strlen(param_buf), i; + + for (i = 0; i < iqn_size; i++) { + c = ¶m_buf[i]; + if (!isupper(*c)) + continue; + + *c = tolower(*c); + } +} + +/* + * Processes the first Login Request.. + */ +int iscsi_target_locate_portal( + struct iscsi_np *np, + struct iscsit_conn *conn, + struct iscsi_login *login) +{ + char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL; + char *tmpbuf, *start = NULL, *end = NULL, *key, *value; + struct iscsit_session *sess = conn->sess; + struct iscsi_tiqn *tiqn; + struct iscsi_tpg_np *tpg_np = NULL; + struct iscsi_login_req *login_req; + struct se_node_acl *se_nacl; + u32 payload_length, queue_depth = 0; + int sessiontype = 0, ret = 0, tag_num, tag_size; + + INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx); + iscsi_target_set_sock_callbacks(conn); + + login->np = np; + conn->tpg = NULL; + + login_req = (struct iscsi_login_req *) login->req; + payload_length = ntoh24(login_req->dlength); + + tmpbuf = kmemdup_nul(login->req_buf, payload_length, GFP_KERNEL); + if (!tmpbuf) { + pr_err("Unable to allocate memory for tmpbuf.\n"); + return -1; + } + + start = tmpbuf; + end = (start + payload_length); + + /* + * Locate the initial keys expected from the Initiator node in + * the first login request in order to progress with the login phase. + */ + while (start < end) { + if (iscsi_extract_key_value(start, &key, &value) < 0) { + ret = -1; + goto out; + } + + if (!strncmp(key, "InitiatorName", 13)) + i_buf = value; + else if (!strncmp(key, "SessionType", 11)) + s_buf = value; + else if (!strncmp(key, "TargetName", 10)) + t_buf = value; + + start += strlen(key) + strlen(value) + 2; + } + /* + * See 5.3. Login Phase. + */ + if (!i_buf) { + pr_err("InitiatorName key not received" + " in first login request.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_MISSING_FIELDS); + ret = -1; + goto out; + } + /* + * Convert the incoming InitiatorName to lowercase following + * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs + * are NOT case sensitive. + */ + iscsi_initiatorname_tolower(i_buf); + + if (!s_buf) { + if (!login->leading_connection) + goto get_target; + + pr_err("SessionType key not received" + " in first login request.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_MISSING_FIELDS); + ret = -1; + goto out; + } + + /* + * Use default portal group for discovery sessions. + */ + sessiontype = strncmp(s_buf, DISCOVERY, 9); + if (!sessiontype) { + if (!login->leading_connection) + goto get_target; + + sess->sess_ops->SessionType = 1; + /* + * Setup crc32c modules from libcrypto + */ + if (iscsi_login_setup_crypto(conn) < 0) { + pr_err("iscsi_login_setup_crypto() failed\n"); + ret = -1; + goto out; + } + /* + * Serialize access across the discovery struct iscsi_portal_group to + * process login attempt. + */ + conn->tpg = iscsit_global->discovery_tpg; + if (iscsit_access_np(np, conn->tpg) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); + conn->tpg = NULL; + ret = -1; + goto out; + } + ret = 0; + goto alloc_tags; + } + +get_target: + if (!t_buf) { + pr_err("TargetName key not received" + " in first login request while" + " SessionType=Normal.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_MISSING_FIELDS); + ret = -1; + goto out; + } + + /* + * Locate Target IQN from Storage Node. + */ + tiqn = iscsit_get_tiqn_for_login(t_buf); + if (!tiqn) { + pr_err("Unable to locate Target IQN: %s in" + " Storage Node\n", t_buf); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); + ret = -1; + goto out; + } + pr_debug("Located Storage Object: %s\n", tiqn->tiqn); + + /* + * Locate Target Portal Group from Storage Node. + */ + conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np); + if (!conn->tpg) { + pr_err("Unable to locate Target Portal Group" + " on %s\n", tiqn->tiqn); + iscsit_put_tiqn_for_login(tiqn); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); + ret = -1; + goto out; + } + conn->tpg_np = tpg_np; + pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt); + /* + * Setup crc32c modules from libcrypto + */ + if (iscsi_login_setup_crypto(conn) < 0) { + pr_err("iscsi_login_setup_crypto() failed\n"); + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); + iscsit_put_tiqn_for_login(tiqn); + conn->tpg = NULL; + ret = -1; + goto out; + } + /* + * Serialize access across the struct iscsi_portal_group to + * process login attempt. + */ + if (iscsit_access_np(np, conn->tpg) < 0) { + kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); + iscsit_put_tiqn_for_login(tiqn); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); + conn->tpg = NULL; + ret = -1; + goto out; + } + + /* + * conn->sess->node_acl will be set when the referenced + * struct iscsit_session is located from received ISID+TSIH in + * iscsi_login_non_zero_tsih_s2(). + */ + if (!login->leading_connection) { + ret = 0; + goto out; + } + + /* + * This value is required in iscsi_login_zero_tsih_s2() + */ + sess->sess_ops->SessionType = 0; + + /* + * Locate incoming Initiator IQN reference from Storage Node. + */ + sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( + &conn->tpg->tpg_se_tpg, i_buf); + if (!sess->se_sess->se_node_acl) { + pr_err("iSCSI Initiator Node: %s is not authorized to" + " access iSCSI target portal group: %hu.\n", + i_buf, conn->tpg->tpgt); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, + ISCSI_LOGIN_STATUS_TGT_FORBIDDEN); + ret = -1; + goto out; + } + se_nacl = sess->se_sess->se_node_acl; + queue_depth = se_nacl->queue_depth; + /* + * Setup pre-allocated tags based upon allowed per NodeACL CmdSN + * depth for non immediate commands, plus extra tags for immediate + * commands. + * + * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention + * in per-cpu-ida tag allocation logic + small queue_depth. + */ +alloc_tags: + tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; + tag_size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size; + + ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); + if (ret < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + ret = -1; + } +out: + kfree(tmpbuf); + return ret; +} + +int iscsi_target_start_negotiation( + struct iscsi_login *login, + struct iscsit_conn *conn) +{ + int ret; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + /* + * If iscsi_target_do_login returns zero to signal more PDU + * exchanges are required to complete the login, go ahead and + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection + * is still active. + * + * Otherwise if TCP connection dropped asynchronously, go ahead + * and perform connection cleanup now. + */ + ret = iscsi_target_do_login(conn, login); + if (!ret) { + spin_lock(&conn->login_worker_lock); + + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) + ret = -1; + else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) { + if (iscsit_set_login_timer_kworker(conn, NULL) < 0) { + /* + * The timeout has expired already. + * Schedule login_work to perform the cleanup. + */ + schedule_delayed_work(&conn->login_work, 0); + } + } + + spin_unlock(&conn->login_worker_lock); + } + + if (ret < 0) { + iscsi_target_restore_sock_callbacks(conn); + iscsi_remove_failed_auth_entry(conn); + } + if (ret != 0) { + iscsit_stop_login_timer(conn); + cancel_delayed_work_sync(&conn->login_work); + iscsi_target_nego_release(conn); + } + + return ret; +} + +void iscsi_target_nego_release(struct iscsit_conn *conn) +{ + struct iscsi_login *login = conn->conn_login; + + if (!login) + return; + + kfree(login->req_buf); + kfree(login->rsp_buf); + kfree(login); + + conn->conn_login = NULL; +} diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h new file mode 100644 index 0000000000..41c3db3dde --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_nego.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_NEGO_H +#define ISCSI_TARGET_NEGO_H + +#define DECIMAL 0 +#define HEX 1 +#define BASE64 2 + +struct iscsit_conn; +struct iscsi_login; +struct iscsi_np; + +extern void convert_null_to_semi(char *, int); +extern int extract_param(const char *, const char *, unsigned int, char *, + unsigned char *); +extern int iscsi_target_check_login_request(struct iscsit_conn *, + struct iscsi_login *); +extern int iscsi_target_get_initial_payload(struct iscsit_conn *, + struct iscsi_login *); +extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *, + struct iscsi_login *); +extern int iscsi_target_start_negotiation( + struct iscsi_login *, struct iscsit_conn *); +extern void iscsi_target_nego_release(struct iscsit_conn *); +extern bool iscsi_conn_auth_required(struct iscsit_conn *conn); +#endif /* ISCSI_TARGET_NEGO_H */ diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c new file mode 100644 index 0000000000..d63efdefb1 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains the main functions related to Initiator Node Attributes. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <target/target_core_base.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_device.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target_nodeattrib.h" + +static inline char *iscsit_na_get_initiatorname( + struct iscsi_node_acl *nacl) +{ + struct se_node_acl *se_nacl = &nacl->se_node_acl; + + return &se_nacl->initiatorname[0]; +} + +void iscsit_set_default_node_attribues( + struct iscsi_node_acl *acl, + struct iscsi_portal_group *tpg) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + a->authentication = NA_AUTHENTICATION_INHERITED; + a->dataout_timeout = NA_DATAOUT_TIMEOUT; + a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES; + a->nopin_timeout = NA_NOPIN_TIMEOUT; + a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT; + a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS; + a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS; + a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; + a->default_erl = tpg->tpg_attrib.default_erl; +} + +int iscsit_na_dataout_timeout( + struct iscsi_node_acl *acl, + u32 dataout_timeout) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) { + pr_err("Requested DataOut Timeout %u larger than" + " maximum %u\n", dataout_timeout, + NA_DATAOUT_TIMEOUT_MAX); + return -EINVAL; + } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) { + pr_err("Requested DataOut Timeout %u smaller than" + " minimum %u\n", dataout_timeout, + NA_DATAOUT_TIMEOUT_MIX); + return -EINVAL; + } + + a->dataout_timeout = dataout_timeout; + pr_debug("Set DataOut Timeout to %u for Initiator Node" + " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl)); + + return 0; +} + +int iscsit_na_dataout_timeout_retries( + struct iscsi_node_acl *acl, + u32 dataout_timeout_retries) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) { + pr_err("Requested DataOut Timeout Retries %u larger" + " than maximum %u", dataout_timeout_retries, + NA_DATAOUT_TIMEOUT_RETRIES_MAX); + return -EINVAL; + } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) { + pr_err("Requested DataOut Timeout Retries %u smaller" + " than minimum %u", dataout_timeout_retries, + NA_DATAOUT_TIMEOUT_RETRIES_MIN); + return -EINVAL; + } + + a->dataout_timeout_retries = dataout_timeout_retries; + pr_debug("Set DataOut Timeout Retries to %u for" + " Initiator Node %s\n", a->dataout_timeout_retries, + iscsit_na_get_initiatorname(acl)); + + return 0; +} + +int iscsit_na_nopin_timeout( + struct iscsi_node_acl *acl, + u32 nopin_timeout) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + struct iscsit_session *sess; + struct iscsit_conn *conn; + struct se_node_acl *se_nacl = &a->nacl->se_node_acl; + struct se_session *se_sess; + u32 orig_nopin_timeout = a->nopin_timeout; + + if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) { + pr_err("Requested NopIn Timeout %u larger than maximum" + " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX); + return -EINVAL; + } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) && + (nopin_timeout != 0)) { + pr_err("Requested NopIn Timeout %u smaller than" + " minimum %u and not 0\n", nopin_timeout, + NA_NOPIN_TIMEOUT_MIN); + return -EINVAL; + } + + a->nopin_timeout = nopin_timeout; + pr_debug("Set NopIn Timeout to %u for Initiator" + " Node %s\n", a->nopin_timeout, + iscsit_na_get_initiatorname(acl)); + /* + * Reenable disabled nopin_timeout timer for all iSCSI connections. + */ + if (!orig_nopin_timeout) { + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + + spin_lock(&sess->conn_lock); + list_for_each_entry(conn, &sess->sess_conn_list, + conn_list) { + if (conn->conn_state != + TARG_CONN_STATE_LOGGED_IN) + continue; + + spin_lock(&conn->nopin_timer_lock); + __iscsit_start_nopin_timer(conn); + spin_unlock(&conn->nopin_timer_lock); + } + spin_unlock(&sess->conn_lock); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + } + + return 0; +} + +int iscsit_na_nopin_response_timeout( + struct iscsi_node_acl *acl, + u32 nopin_response_timeout) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) { + pr_err("Requested NopIn Response Timeout %u larger" + " than maximum %u\n", nopin_response_timeout, + NA_NOPIN_RESPONSE_TIMEOUT_MAX); + return -EINVAL; + } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) { + pr_err("Requested NopIn Response Timeout %u smaller" + " than minimum %u\n", nopin_response_timeout, + NA_NOPIN_RESPONSE_TIMEOUT_MIN); + return -EINVAL; + } + + a->nopin_response_timeout = nopin_response_timeout; + pr_debug("Set NopIn Response Timeout to %u for" + " Initiator Node %s\n", a->nopin_timeout, + iscsit_na_get_initiatorname(acl)); + + return 0; +} + +int iscsit_na_random_datain_pdu_offsets( + struct iscsi_node_acl *acl, + u32 random_datain_pdu_offsets) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) { + pr_err("Requested Random DataIN PDU Offsets: %u not" + " 0 or 1\n", random_datain_pdu_offsets); + return -EINVAL; + } + + a->random_datain_pdu_offsets = random_datain_pdu_offsets; + pr_debug("Set Random DataIN PDU Offsets to %u for" + " Initiator Node %s\n", a->random_datain_pdu_offsets, + iscsit_na_get_initiatorname(acl)); + + return 0; +} + +int iscsit_na_random_datain_seq_offsets( + struct iscsi_node_acl *acl, + u32 random_datain_seq_offsets) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) { + pr_err("Requested Random DataIN Sequence Offsets: %u" + " not 0 or 1\n", random_datain_seq_offsets); + return -EINVAL; + } + + a->random_datain_seq_offsets = random_datain_seq_offsets; + pr_debug("Set Random DataIN Sequence Offsets to %u for" + " Initiator Node %s\n", a->random_datain_seq_offsets, + iscsit_na_get_initiatorname(acl)); + + return 0; +} + +int iscsit_na_random_r2t_offsets( + struct iscsi_node_acl *acl, + u32 random_r2t_offsets) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + if (random_r2t_offsets != 0 && random_r2t_offsets != 1) { + pr_err("Requested Random R2T Offsets: %u not" + " 0 or 1\n", random_r2t_offsets); + return -EINVAL; + } + + a->random_r2t_offsets = random_r2t_offsets; + pr_debug("Set Random R2T Offsets to %u for" + " Initiator Node %s\n", a->random_r2t_offsets, + iscsit_na_get_initiatorname(acl)); + + return 0; +} + +int iscsit_na_default_erl( + struct iscsi_node_acl *acl, + u32 default_erl) +{ + struct iscsi_node_attrib *a = &acl->node_attrib; + + if (default_erl != 0 && default_erl != 1 && default_erl != 2) { + pr_err("Requested default ERL: %u not 0, 1, or 2\n", + default_erl); + return -EINVAL; + } + + a->default_erl = default_erl; + pr_debug("Set use ERL0 flag to %u for Initiator" + " Node %s\n", a->default_erl, + iscsit_na_get_initiatorname(acl)); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h new file mode 100644 index 0000000000..ce074cb545 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_NODEATTRIB_H +#define ISCSI_TARGET_NODEATTRIB_H + +#include <linux/types.h> + +struct iscsi_node_acl; +struct iscsi_portal_group; + +extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *, + struct iscsi_portal_group *); +extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32); +extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32); +extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32); +extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32); +extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32); +extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32); +extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32); +extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32); + +#endif /* ISCSI_TARGET_NODEATTRIB_H */ diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c new file mode 100644 index 0000000000..5b90c22ee3 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -0,0 +1,1717 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains main functions related to iSCSI Parameter negotiation. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/uio.h> /* struct kvec */ +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_util.h" +#include "iscsi_target_parameters.h" + +int iscsi_login_rx_data( + struct iscsit_conn *conn, + char *buf, + int length) +{ + int rx_got; + struct kvec iov; + + memset(&iov, 0, sizeof(struct kvec)); + iov.iov_len = length; + iov.iov_base = buf; + + rx_got = rx_data(conn, &iov, 1, length); + if (rx_got != length) { + pr_err("rx_data returned %d, expecting %d.\n", + rx_got, length); + return -1; + } + + return 0 ; +} + +int iscsi_login_tx_data( + struct iscsit_conn *conn, + char *pdu_buf, + char *text_buf, + int text_length) +{ + int length, tx_sent, iov_cnt = 1; + struct kvec iov[2]; + + length = (ISCSI_HDR_LEN + text_length); + + memset(&iov[0], 0, 2 * sizeof(struct kvec)); + iov[0].iov_len = ISCSI_HDR_LEN; + iov[0].iov_base = pdu_buf; + + if (text_buf && text_length) { + iov[1].iov_len = text_length; + iov[1].iov_base = text_buf; + iov_cnt++; + } + + tx_sent = tx_data(conn, &iov[0], iov_cnt, length); + if (tx_sent != length) { + pr_err("tx_data returned %d, expecting %d.\n", + tx_sent, length); + return -1; + } + + return 0; +} + +void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops) +{ + pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ? + "CRC32C" : "None"); + pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ? + "CRC32C" : "None"); + pr_debug("MaxRecvDataSegmentLength: %u\n", + conn_ops->MaxRecvDataSegmentLength); +} + +void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops) +{ + pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName); + pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias); + pr_debug("TargetName: %s\n", sess_ops->TargetName); + pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias); + pr_debug("TargetPortalGroupTag: %hu\n", + sess_ops->TargetPortalGroupTag); + pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections); + pr_debug("InitialR2T: %s\n", + (sess_ops->InitialR2T) ? "Yes" : "No"); + pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ? + "Yes" : "No"); + pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength); + pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength); + pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait); + pr_debug("DefaultTime2Retain: %hu\n", + sess_ops->DefaultTime2Retain); + pr_debug("MaxOutstandingR2T: %hu\n", + sess_ops->MaxOutstandingR2T); + pr_debug("DataPDUInOrder: %s\n", + (sess_ops->DataPDUInOrder) ? "Yes" : "No"); + pr_debug("DataSequenceInOrder: %s\n", + (sess_ops->DataSequenceInOrder) ? "Yes" : "No"); + pr_debug("ErrorRecoveryLevel: %hu\n", + sess_ops->ErrorRecoveryLevel); + pr_debug("SessionType: %s\n", (sess_ops->SessionType) ? + "Discovery" : "Normal"); +} + +void iscsi_print_params(struct iscsi_param_list *param_list) +{ + struct iscsi_param *param; + + list_for_each_entry(param, ¶m_list->param_list, p_list) + pr_debug("%s: %s\n", param->name, param->value); +} + +static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list, + char *name, char *value, u8 phase, u8 scope, u8 sender, + u16 type_range, u8 use) +{ + struct iscsi_param *param = NULL; + + param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); + if (!param) { + pr_err("Unable to allocate memory for parameter.\n"); + goto out; + } + INIT_LIST_HEAD(¶m->p_list); + + param->name = kstrdup(name, GFP_KERNEL); + if (!param->name) { + pr_err("Unable to allocate memory for parameter name.\n"); + goto out; + } + + param->value = kstrdup(value, GFP_KERNEL); + if (!param->value) { + pr_err("Unable to allocate memory for parameter value.\n"); + goto out; + } + + param->phase = phase; + param->scope = scope; + param->sender = sender; + param->use = use; + param->type_range = type_range; + + switch (param->type_range) { + case TYPERANGE_BOOL_AND: + param->type = TYPE_BOOL_AND; + break; + case TYPERANGE_BOOL_OR: + param->type = TYPE_BOOL_OR; + break; + case TYPERANGE_0_TO_2: + case TYPERANGE_0_TO_3600: + case TYPERANGE_0_TO_32767: + case TYPERANGE_0_TO_65535: + case TYPERANGE_1_TO_65535: + case TYPERANGE_2_TO_3600: + case TYPERANGE_512_TO_16777215: + param->type = TYPE_NUMBER; + break; + case TYPERANGE_AUTH: + case TYPERANGE_DIGEST: + param->type = TYPE_VALUE_LIST | TYPE_STRING; + break; + case TYPERANGE_ISCSINAME: + case TYPERANGE_SESSIONTYPE: + case TYPERANGE_TARGETADDRESS: + case TYPERANGE_UTF8: + param->type = TYPE_STRING; + break; + default: + pr_err("Unknown type_range 0x%02x\n", + param->type_range); + goto out; + } + list_add_tail(¶m->p_list, ¶m_list->param_list); + + return param; +out: + if (param) { + kfree(param->value); + kfree(param->name); + kfree(param); + } + + return NULL; +} + +/* #warning Add extension keys */ +int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) +{ + struct iscsi_param *param = NULL; + struct iscsi_param_list *pl; + + pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); + if (!pl) { + pr_err("Unable to allocate memory for" + " struct iscsi_param_list.\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&pl->param_list); + INIT_LIST_HEAD(&pl->extra_response_list); + + /* + * The format for setting the initial parameter definitions are: + * + * Parameter name: + * Initial value: + * Allowable phase: + * Scope: + * Allowable senders: + * Typerange: + * Use: + */ + param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD, + PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_AUTH, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_DIGEST, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_DIGEST, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, MAXCONNECTIONS, + INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL, + SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_1_TO_65535, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS, + PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR, + TYPERANGE_UTF8, 0); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME, + PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_ISCSINAME, USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, INITIATORNAME, + INITIAL_INITIATORNAME, PHASE_DECLARATIVE, + SCOPE_SESSION_WIDE, SENDER_INITIATOR, + TYPERANGE_ISCSINAME, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS, + PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET, + TYPERANGE_UTF8, USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, INITIATORALIAS, + INITIAL_INITIATORALIAS, PHASE_DECLARATIVE, + SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8, + USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, TARGETADDRESS, + INITIAL_TARGETADDRESS, PHASE_DECLARATIVE, + SCOPE_SESSION_WIDE, SENDER_TARGET, + TYPERANGE_TARGETADDRESS, USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG, + INITIAL_TARGETPORTALGROUPTAG, + PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET, + TYPERANGE_0_TO_65535, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_BOOL_OR, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, IMMEDIATEDATA, + INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL, + SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND, + USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, MAXXMITDATASEGMENTLENGTH, + INITIAL_MAXXMITDATASEGMENTLENGTH, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH, + INITIAL_MAXRECVDATASEGMENTLENGTH, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, MAXBURSTLENGTH, + INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL, + SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH, + INITIAL_FIRSTBURSTLENGTH, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT, + INITIAL_DEFAULTTIME2WAIT, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_0_TO_3600, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN, + INITIAL_DEFAULTTIME2RETAIN, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_0_TO_3600, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T, + INITIAL_MAXOUTSTANDINGR2T, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_1_TO_65535, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, DATAPDUINORDER, + INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL, + SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR, + USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, DATASEQUENCEINORDER, + INITIAL_DATASEQUENCEINORDER, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_BOOL_OR, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL, + INITIAL_ERRORRECOVERYLEVEL, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_0_TO_2, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE, + PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR, + TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_BOOL_AND, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_BOOL_AND, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_UTF8, USE_INITIAL_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_UTF8, USE_INITIAL_ONLY); + if (!param) + goto out; + + /* + * Extra parameters for ISER from RFC-5046 + */ + param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS, + PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, + TYPERANGE_BOOL_AND, USE_LEADING_ONLY); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH, + INITIAL_INITIATORRECVDATASEGMENTLENGTH, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_ALL); + if (!param) + goto out; + + param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH, + INITIAL_TARGETRECVDATASEGMENTLENGTH, + PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH, + TYPERANGE_512_TO_16777215, USE_ALL); + if (!param) + goto out; + + *param_list_ptr = pl; + return 0; +out: + iscsi_release_param_list(pl); + return -1; +} + +int iscsi_set_keys_to_negotiate( + struct iscsi_param_list *param_list, + bool iser) +{ + struct iscsi_param *param; + + param_list->iser = iser; + + list_for_each_entry(param, ¶m_list->param_list, p_list) { + param->state = 0; + if (!strcmp(param->name, AUTHMETHOD)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, HEADERDIGEST)) { + if (!iser) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, DATADIGEST)) { + if (!iser) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, MAXCONNECTIONS)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, TARGETNAME)) { + continue; + } else if (!strcmp(param->name, INITIATORNAME)) { + continue; + } else if (!strcmp(param->name, TARGETALIAS)) { + if (param->value) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, INITIATORALIAS)) { + continue; + } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, INITIALR2T)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, IMMEDIATEDATA)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { + if (!iser) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { + continue; + } else if (!strcmp(param->name, MAXBURSTLENGTH)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, DATAPDUINORDER)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, DATASEQUENCEINORDER)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, SESSIONTYPE)) { + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, IFMARKER)) { + SET_PSTATE_REJECT(param); + } else if (!strcmp(param->name, OFMARKER)) { + SET_PSTATE_REJECT(param); + } else if (!strcmp(param->name, IFMARKINT)) { + SET_PSTATE_REJECT(param); + } else if (!strcmp(param->name, OFMARKINT)) { + SET_PSTATE_REJECT(param); + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { + if (iser) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { + if (iser) + SET_PSTATE_NEGOTIATE(param); + } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { + if (iser) + SET_PSTATE_NEGOTIATE(param); + } + } + + return 0; +} + +int iscsi_set_keys_irrelevant_for_discovery( + struct iscsi_param_list *param_list) +{ + struct iscsi_param *param; + + list_for_each_entry(param, ¶m_list->param_list, p_list) { + if (!strcmp(param->name, MAXCONNECTIONS)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, INITIALR2T)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, IMMEDIATEDATA)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, MAXBURSTLENGTH)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, FIRSTBURSTLENGTH)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, DATAPDUINORDER)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, DATASEQUENCEINORDER)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, DEFAULTTIME2WAIT)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, IFMARKER)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, OFMARKER)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, IFMARKINT)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, OFMARKINT)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, RDMAEXTENSIONS)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) + param->state &= ~PSTATE_NEGOTIATE; + else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) + param->state &= ~PSTATE_NEGOTIATE; + } + + return 0; +} + +int iscsi_copy_param_list( + struct iscsi_param_list **dst_param_list, + struct iscsi_param_list *src_param_list, + int leading) +{ + struct iscsi_param *param = NULL; + struct iscsi_param *new_param = NULL; + struct iscsi_param_list *param_list = NULL; + + param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); + if (!param_list) { + pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(¶m_list->param_list); + INIT_LIST_HEAD(¶m_list->extra_response_list); + + list_for_each_entry(param, &src_param_list->param_list, p_list) { + if (!leading && (param->scope & SCOPE_SESSION_WIDE)) { + if ((strcmp(param->name, "TargetName") != 0) && + (strcmp(param->name, "InitiatorName") != 0) && + (strcmp(param->name, "TargetPortalGroupTag") != 0)) + continue; + } + + new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); + if (!new_param) { + pr_err("Unable to allocate memory for struct iscsi_param.\n"); + goto err_out; + } + + new_param->name = kstrdup(param->name, GFP_KERNEL); + new_param->value = kstrdup(param->value, GFP_KERNEL); + if (!new_param->value || !new_param->name) { + kfree(new_param->value); + kfree(new_param->name); + kfree(new_param); + pr_err("Unable to allocate memory for parameter name/value.\n"); + goto err_out; + } + + new_param->set_param = param->set_param; + new_param->phase = param->phase; + new_param->scope = param->scope; + new_param->sender = param->sender; + new_param->type = param->type; + new_param->use = param->use; + new_param->type_range = param->type_range; + + list_add_tail(&new_param->p_list, ¶m_list->param_list); + } + + if (!list_empty(¶m_list->param_list)) { + *dst_param_list = param_list; + } else { + pr_err("No parameters allocated.\n"); + goto err_out; + } + + return 0; + +err_out: + iscsi_release_param_list(param_list); + return -ENOMEM; +} + +static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) +{ + struct iscsi_extra_response *er, *er_tmp; + + list_for_each_entry_safe(er, er_tmp, ¶m_list->extra_response_list, + er_list) { + list_del(&er->er_list); + kfree(er); + } +} + +void iscsi_release_param_list(struct iscsi_param_list *param_list) +{ + struct iscsi_param *param, *param_tmp; + + list_for_each_entry_safe(param, param_tmp, ¶m_list->param_list, + p_list) { + list_del(¶m->p_list); + + kfree(param->name); + kfree(param->value); + kfree(param); + } + + iscsi_release_extra_responses(param_list); + + kfree(param_list); +} + +struct iscsi_param *iscsi_find_param_from_key( + char *key, + struct iscsi_param_list *param_list) +{ + struct iscsi_param *param; + + if (!key || !param_list) { + pr_err("Key or parameter list pointer is NULL.\n"); + return NULL; + } + + list_for_each_entry(param, ¶m_list->param_list, p_list) { + if (!strcmp(key, param->name)) + return param; + } + + pr_err("Unable to locate key \"%s\".\n", key); + return NULL; +} +EXPORT_SYMBOL(iscsi_find_param_from_key); + +int iscsi_extract_key_value(char *textbuf, char **key, char **value) +{ + *value = strchr(textbuf, '='); + if (!*value) { + pr_err("Unable to locate \"=\" separator for key," + " ignoring request.\n"); + return -1; + } + + *key = textbuf; + **value = '\0'; + *value = *value + 1; + + return 0; +} + +int iscsi_update_param_value(struct iscsi_param *param, char *value) +{ + kfree(param->value); + + param->value = kstrdup(value, GFP_KERNEL); + if (!param->value) { + pr_err("Unable to allocate memory for value.\n"); + return -ENOMEM; + } + + pr_debug("iSCSI Parameter updated to %s=%s\n", + param->name, param->value); + return 0; +} + +static int iscsi_add_notunderstood_response( + char *key, + char *value, + struct iscsi_param_list *param_list) +{ + struct iscsi_extra_response *extra_response; + + if (strlen(value) > VALUE_MAXLEN) { + pr_err("Value for notunderstood key \"%s\" exceeds %d," + " protocol error.\n", key, VALUE_MAXLEN); + return -1; + } + + extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL); + if (!extra_response) { + pr_err("Unable to allocate memory for" + " struct iscsi_extra_response.\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&extra_response->er_list); + + strscpy(extra_response->key, key, sizeof(extra_response->key)); + strscpy(extra_response->value, NOTUNDERSTOOD, + sizeof(extra_response->value)); + + list_add_tail(&extra_response->er_list, + ¶m_list->extra_response_list); + return 0; +} + +static int iscsi_check_for_auth_key(char *key) +{ + /* + * RFC 1994 + */ + if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") || + !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") || + !strcmp(key, "CHAP_R")) + return 1; + + /* + * RFC 2945 + */ + if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") || + !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") || + !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") || + !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM")) + return 1; + + return 0; +} + +static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param, + bool keys_workaround) +{ + if (IS_TYPE_BOOL_AND(param)) { + if (!strcmp(param->value, NO)) + SET_PSTATE_REPLY_OPTIONAL(param); + } else if (IS_TYPE_BOOL_OR(param)) { + if (!strcmp(param->value, YES)) + SET_PSTATE_REPLY_OPTIONAL(param); + + if (keys_workaround) { + /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, IMMEDIATEDATA)) + SET_PSTATE_REPLY_OPTIONAL(param); + } + } else if (IS_TYPE_NUMBER(param)) { + if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) + SET_PSTATE_REPLY_OPTIONAL(param); + + if (keys_workaround) { + /* + * Required for Mellanox Flexboot PXE boot ROM + */ + if (!strcmp(param->name, FIRSTBURSTLENGTH)) + SET_PSTATE_REPLY_OPTIONAL(param); + + /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, MAXCONNECTIONS)) + SET_PSTATE_REPLY_OPTIONAL(param); + } + } else if (IS_PHASE_DECLARATIVE(param)) + SET_PSTATE_REPLY_OPTIONAL(param); +} + +static int iscsi_check_boolean_value(struct iscsi_param *param, char *value) +{ + if (strcmp(value, YES) && strcmp(value, NO)) { + pr_err("Illegal value for \"%s\", must be either" + " \"%s\" or \"%s\".\n", param->name, YES, NO); + return -1; + } + + return 0; +} + +static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr) +{ + char *tmpptr; + int value = 0; + + value = simple_strtoul(value_ptr, &tmpptr, 0); + + if (IS_TYPERANGE_0_TO_2(param)) { + if ((value < 0) || (value > 2)) { + pr_err("Illegal value for \"%s\", must be" + " between 0 and 2.\n", param->name); + return -1; + } + return 0; + } + if (IS_TYPERANGE_0_TO_3600(param)) { + if ((value < 0) || (value > 3600)) { + pr_err("Illegal value for \"%s\", must be" + " between 0 and 3600.\n", param->name); + return -1; + } + return 0; + } + if (IS_TYPERANGE_0_TO_32767(param)) { + if ((value < 0) || (value > 32767)) { + pr_err("Illegal value for \"%s\", must be" + " between 0 and 32767.\n", param->name); + return -1; + } + return 0; + } + if (IS_TYPERANGE_0_TO_65535(param)) { + if ((value < 0) || (value > 65535)) { + pr_err("Illegal value for \"%s\", must be" + " between 0 and 65535.\n", param->name); + return -1; + } + return 0; + } + if (IS_TYPERANGE_1_TO_65535(param)) { + if ((value < 1) || (value > 65535)) { + pr_err("Illegal value for \"%s\", must be" + " between 1 and 65535.\n", param->name); + return -1; + } + return 0; + } + if (IS_TYPERANGE_2_TO_3600(param)) { + if ((value < 2) || (value > 3600)) { + pr_err("Illegal value for \"%s\", must be" + " between 2 and 3600.\n", param->name); + return -1; + } + return 0; + } + if (IS_TYPERANGE_512_TO_16777215(param)) { + if ((value < 512) || (value > 16777215)) { + pr_err("Illegal value for \"%s\", must be" + " between 512 and 16777215.\n", param->name); + return -1; + } + return 0; + } + + return 0; +} + +static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value) +{ + if (IS_PSTATE_PROPOSER(param)) + return 0; + + if (IS_TYPERANGE_AUTH_PARAM(param)) { + if (strcmp(value, KRB5) && strcmp(value, SPKM1) && + strcmp(value, SPKM2) && strcmp(value, SRP) && + strcmp(value, CHAP) && strcmp(value, NONE)) { + pr_err("Illegal value for \"%s\", must be" + " \"%s\", \"%s\", \"%s\", \"%s\", \"%s\"" + " or \"%s\".\n", param->name, KRB5, + SPKM1, SPKM2, SRP, CHAP, NONE); + return -1; + } + } + if (IS_TYPERANGE_DIGEST_PARAM(param)) { + if (strcmp(value, CRC32C) && strcmp(value, NONE)) { + pr_err("Illegal value for \"%s\", must be" + " \"%s\" or \"%s\".\n", param->name, + CRC32C, NONE); + return -1; + } + } + if (IS_TYPERANGE_SESSIONTYPE(param)) { + if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) { + pr_err("Illegal value for \"%s\", must be" + " \"%s\" or \"%s\".\n", param->name, + DISCOVERY, NORMAL); + return -1; + } + } + + return 0; +} + +static char *iscsi_check_valuelist_for_support( + struct iscsi_param *param, + char *value) +{ + char *tmp1 = NULL, *tmp2 = NULL; + char *acceptor_values = NULL, *proposer_values = NULL; + + acceptor_values = param->value; + proposer_values = value; + + do { + if (!proposer_values) + return NULL; + tmp1 = strchr(proposer_values, ','); + if (tmp1) + *tmp1 = '\0'; + acceptor_values = param->value; + do { + if (!acceptor_values) { + if (tmp1) + *tmp1 = ','; + return NULL; + } + tmp2 = strchr(acceptor_values, ','); + if (tmp2) + *tmp2 = '\0'; + if (!strcmp(acceptor_values, proposer_values)) { + if (tmp2) + *tmp2 = ','; + goto out; + } + if (tmp2) + *tmp2++ = ','; + + acceptor_values = tmp2; + } while (acceptor_values); + if (tmp1) + *tmp1++ = ','; + proposer_values = tmp1; + } while (proposer_values); + +out: + return proposer_values; +} + +static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value, + struct iscsit_conn *conn) +{ + u8 acceptor_boolean_value = 0, proposer_boolean_value = 0; + char *negotiated_value = NULL; + + if (IS_PSTATE_ACCEPTOR(param)) { + pr_err("Received key \"%s\" twice, protocol error.\n", + param->name); + return -1; + } + + if (IS_PSTATE_REJECT(param)) + return 0; + + if (IS_TYPE_BOOL_AND(param)) { + if (!strcmp(value, YES)) + proposer_boolean_value = 1; + if (!strcmp(param->value, YES)) + acceptor_boolean_value = 1; + if (acceptor_boolean_value && proposer_boolean_value) + do {} while (0); + else { + if (iscsi_update_param_value(param, NO) < 0) + return -1; + if (!proposer_boolean_value) + SET_PSTATE_REPLY_OPTIONAL(param); + } + } else if (IS_TYPE_BOOL_OR(param)) { + if (!strcmp(value, YES)) + proposer_boolean_value = 1; + if (!strcmp(param->value, YES)) + acceptor_boolean_value = 1; + if (acceptor_boolean_value || proposer_boolean_value) { + if (iscsi_update_param_value(param, YES) < 0) + return -1; + if (proposer_boolean_value) + SET_PSTATE_REPLY_OPTIONAL(param); + } + } else if (IS_TYPE_NUMBER(param)) { + char *tmpptr, buf[11]; + u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0); + u32 proposer_value = simple_strtoul(value, &tmpptr, 0); + + memset(buf, 0, sizeof(buf)); + + if (!strcmp(param->name, MAXCONNECTIONS) || + !strcmp(param->name, MAXBURSTLENGTH) || + !strcmp(param->name, FIRSTBURSTLENGTH) || + !strcmp(param->name, MAXOUTSTANDINGR2T) || + !strcmp(param->name, DEFAULTTIME2RETAIN) || + !strcmp(param->name, ERRORRECOVERYLEVEL)) { + if (proposer_value > acceptor_value) { + sprintf(buf, "%u", acceptor_value); + if (iscsi_update_param_value(param, + &buf[0]) < 0) + return -1; + } else { + if (iscsi_update_param_value(param, value) < 0) + return -1; + } + } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) { + if (acceptor_value > proposer_value) { + sprintf(buf, "%u", acceptor_value); + if (iscsi_update_param_value(param, + &buf[0]) < 0) + return -1; + } else { + if (iscsi_update_param_value(param, value) < 0) + return -1; + } + } else { + if (iscsi_update_param_value(param, value) < 0) + return -1; + } + + if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { + struct iscsi_param *param_mxdsl; + unsigned long long tmp; + int rc; + + rc = kstrtoull(param->value, 0, &tmp); + if (rc < 0) + return -1; + + conn->conn_ops->MaxRecvDataSegmentLength = tmp; + pr_debug("Saving op->MaxRecvDataSegmentLength from" + " original initiator received value: %u\n", + conn->conn_ops->MaxRecvDataSegmentLength); + + param_mxdsl = iscsi_find_param_from_key( + MAXXMITDATASEGMENTLENGTH, + conn->param_list); + if (!param_mxdsl) + return -1; + + rc = iscsi_update_param_value(param, + param_mxdsl->value); + if (rc < 0) + return -1; + + pr_debug("Updated %s to target MXDSL value: %s\n", + param->name, param->value); + } + } else if (IS_TYPE_VALUE_LIST(param)) { + negotiated_value = iscsi_check_valuelist_for_support( + param, value); + if (!negotiated_value) { + pr_err("Proposer's value list \"%s\" contains" + " no valid values from Acceptor's value list" + " \"%s\".\n", value, param->value); + return -1; + } + if (iscsi_update_param_value(param, negotiated_value) < 0) + return -1; + } else if (IS_PHASE_DECLARATIVE(param)) { + if (iscsi_update_param_value(param, value) < 0) + return -1; + SET_PSTATE_REPLY_OPTIONAL(param); + } + + return 0; +} + +static int iscsi_check_proposer_state(struct iscsi_param *param, char *value) +{ + if (IS_PSTATE_RESPONSE_GOT(param)) { + pr_err("Received key \"%s\" twice, protocol error.\n", + param->name); + return -1; + } + + if (IS_TYPE_VALUE_LIST(param)) { + char *comma_ptr = NULL, *tmp_ptr = NULL; + + comma_ptr = strchr(value, ','); + if (comma_ptr) { + pr_err("Illegal \",\" in response for \"%s\".\n", + param->name); + return -1; + } + + tmp_ptr = iscsi_check_valuelist_for_support(param, value); + if (!tmp_ptr) + return -1; + } + + if (iscsi_update_param_value(param, value) < 0) + return -1; + + return 0; +} + +static int iscsi_check_value(struct iscsi_param *param, char *value) +{ + char *comma_ptr = NULL; + + if (!strcmp(value, REJECT)) { + if (!strcmp(param->name, IFMARKINT) || + !strcmp(param->name, OFMARKINT)) { + /* + * Reject is not fatal for [I,O]FMarkInt, and causes + * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2) + */ + SET_PSTATE_REJECT(param); + return 0; + } + pr_err("Received %s=%s\n", param->name, value); + return -1; + } + if (!strcmp(value, IRRELEVANT)) { + pr_debug("Received %s=%s\n", param->name, value); + SET_PSTATE_IRRELEVANT(param); + return 0; + } + if (!strcmp(value, NOTUNDERSTOOD)) { + if (!IS_PSTATE_PROPOSER(param)) { + pr_err("Received illegal offer %s=%s\n", + param->name, value); + return -1; + } + +/* #warning FIXME: Add check for X-ExtensionKey here */ + pr_err("Standard iSCSI key \"%s\" cannot be answered" + " with \"%s\", protocol error.\n", param->name, value); + return -1; + } + + do { + comma_ptr = NULL; + comma_ptr = strchr(value, ','); + + if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) { + pr_err("Detected value separator \",\", but" + " key \"%s\" does not allow a value list," + " protocol error.\n", param->name); + return -1; + } + if (comma_ptr) + *comma_ptr = '\0'; + + if (strlen(value) > VALUE_MAXLEN) { + pr_err("Value for key \"%s\" exceeds %d," + " protocol error.\n", param->name, + VALUE_MAXLEN); + return -1; + } + + if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) { + if (iscsi_check_boolean_value(param, value) < 0) + return -1; + } else if (IS_TYPE_NUMBER(param)) { + if (iscsi_check_numerical_value(param, value) < 0) + return -1; + } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) { + if (iscsi_check_string_or_list_value(param, value) < 0) + return -1; + } else { + pr_err("Huh? 0x%02x\n", param->type); + return -1; + } + + if (comma_ptr) + *comma_ptr++ = ','; + + value = comma_ptr; + } while (value); + + return 0; +} + +static struct iscsi_param *__iscsi_check_key( + char *key, + int sender, + struct iscsi_param_list *param_list) +{ + struct iscsi_param *param; + + if (strlen(key) > KEY_MAXLEN) { + pr_err("Length of key name \"%s\" exceeds %d.\n", + key, KEY_MAXLEN); + return NULL; + } + + param = iscsi_find_param_from_key(key, param_list); + if (!param) + return NULL; + + if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) { + pr_err("Key \"%s\" may not be sent to %s," + " protocol error.\n", param->name, + (sender & SENDER_RECEIVER) ? "target" : "initiator"); + return NULL; + } + + if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) { + pr_err("Key \"%s\" may not be sent to %s," + " protocol error.\n", param->name, + (sender & SENDER_RECEIVER) ? "initiator" : "target"); + return NULL; + } + + return param; +} + +static struct iscsi_param *iscsi_check_key( + char *key, + int phase, + int sender, + struct iscsi_param_list *param_list) +{ + struct iscsi_param *param; + /* + * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1) + */ + if (strlen(key) > KEY_MAXLEN) { + pr_err("Length of key name \"%s\" exceeds %d.\n", + key, KEY_MAXLEN); + return NULL; + } + + param = iscsi_find_param_from_key(key, param_list); + if (!param) + return NULL; + + if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) { + pr_err("Key \"%s\" may not be sent to %s," + " protocol error.\n", param->name, + (sender & SENDER_RECEIVER) ? "target" : "initiator"); + return NULL; + } + if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) { + pr_err("Key \"%s\" may not be sent to %s," + " protocol error.\n", param->name, + (sender & SENDER_RECEIVER) ? "initiator" : "target"); + return NULL; + } + + if (IS_PSTATE_ACCEPTOR(param)) { + pr_err("Key \"%s\" received twice, protocol error.\n", + key); + return NULL; + } + + if (!phase) + return param; + + if (!(param->phase & phase)) { + char *phase_name; + + switch (phase) { + case PHASE_SECURITY: + phase_name = "Security"; + break; + case PHASE_OPERATIONAL: + phase_name = "Operational"; + break; + default: + phase_name = "Unknown"; + } + pr_err("Key \"%s\" may not be negotiated during %s phase.\n", + param->name, phase_name); + return NULL; + } + + return param; +} + +static int iscsi_enforce_integrity_rules( + u8 phase, + struct iscsi_param_list *param_list) +{ + char *tmpptr; + u8 DataSequenceInOrder = 0; + u8 ErrorRecoveryLevel = 0, SessionType = 0; + u32 FirstBurstLength = 0, MaxBurstLength = 0; + struct iscsi_param *param = NULL; + + list_for_each_entry(param, ¶m_list->param_list, p_list) { + if (!(param->phase & phase)) + continue; + if (!strcmp(param->name, SESSIONTYPE)) + if (!strcmp(param->value, NORMAL)) + SessionType = 1; + if (!strcmp(param->name, ERRORRECOVERYLEVEL)) + ErrorRecoveryLevel = simple_strtoul(param->value, + &tmpptr, 0); + if (!strcmp(param->name, DATASEQUENCEINORDER)) + if (!strcmp(param->value, YES)) + DataSequenceInOrder = 1; + if (!strcmp(param->name, MAXBURSTLENGTH)) + MaxBurstLength = simple_strtoul(param->value, + &tmpptr, 0); + } + + list_for_each_entry(param, ¶m_list->param_list, p_list) { + if (!(param->phase & phase)) + continue; + if (!SessionType && !IS_PSTATE_ACCEPTOR(param)) + continue; + if (!strcmp(param->name, MAXOUTSTANDINGR2T) && + DataSequenceInOrder && (ErrorRecoveryLevel > 0)) { + if (strcmp(param->value, "1")) { + if (iscsi_update_param_value(param, "1") < 0) + return -1; + pr_debug("Reset \"%s\" to \"%s\".\n", + param->name, param->value); + } + } + if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) { + if (strcmp(param->value, "1")) { + if (iscsi_update_param_value(param, "1") < 0) + return -1; + pr_debug("Reset \"%s\" to \"%s\".\n", + param->name, param->value); + } + } + if (!strcmp(param->name, FIRSTBURSTLENGTH)) { + FirstBurstLength = simple_strtoul(param->value, + &tmpptr, 0); + if (FirstBurstLength > MaxBurstLength) { + char tmpbuf[11]; + memset(tmpbuf, 0, sizeof(tmpbuf)); + sprintf(tmpbuf, "%u", MaxBurstLength); + if (iscsi_update_param_value(param, tmpbuf)) + return -1; + pr_debug("Reset \"%s\" to \"%s\".\n", + param->name, param->value); + } + } + } + + return 0; +} + +int iscsi_decode_text_input( + u8 phase, + u8 sender, + char *textbuf, + u32 length, + struct iscsit_conn *conn) +{ + struct iscsi_param_list *param_list = conn->param_list; + char *tmpbuf, *start = NULL, *end = NULL; + + tmpbuf = kmemdup_nul(textbuf, length, GFP_KERNEL); + if (!tmpbuf) { + pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); + return -ENOMEM; + } + + start = tmpbuf; + end = (start + length); + + while (start < end) { + char *key, *value; + struct iscsi_param *param; + + if (iscsi_extract_key_value(start, &key, &value) < 0) + goto free_buffer; + + pr_debug("Got key: %s=%s\n", key, value); + + if (phase & PHASE_SECURITY) { + if (iscsi_check_for_auth_key(key) > 0) { + kfree(tmpbuf); + return 1; + } + } + + param = iscsi_check_key(key, phase, sender, param_list); + if (!param) { + if (iscsi_add_notunderstood_response(key, value, + param_list) < 0) + goto free_buffer; + + start += strlen(key) + strlen(value) + 2; + continue; + } + if (iscsi_check_value(param, value) < 0) + goto free_buffer; + + start += strlen(key) + strlen(value) + 2; + + if (IS_PSTATE_PROPOSER(param)) { + if (iscsi_check_proposer_state(param, value) < 0) + goto free_buffer; + + SET_PSTATE_RESPONSE_GOT(param); + } else { + if (iscsi_check_acceptor_state(param, value, conn) < 0) + goto free_buffer; + + SET_PSTATE_ACCEPTOR(param); + } + } + + kfree(tmpbuf); + return 0; + +free_buffer: + kfree(tmpbuf); + return -1; +} + +int iscsi_encode_text_output( + u8 phase, + u8 sender, + char *textbuf, + u32 *length, + struct iscsi_param_list *param_list, + bool keys_workaround) +{ + char *output_buf = NULL; + struct iscsi_extra_response *er; + struct iscsi_param *param; + + output_buf = textbuf + *length; + + if (iscsi_enforce_integrity_rules(phase, param_list) < 0) + return -1; + + list_for_each_entry(param, ¶m_list->param_list, p_list) { + if (!(param->sender & sender)) + continue; + if (IS_PSTATE_ACCEPTOR(param) && + !IS_PSTATE_RESPONSE_SENT(param) && + !IS_PSTATE_REPLY_OPTIONAL(param) && + (param->phase & phase)) { + *length += sprintf(output_buf, "%s=%s", + param->name, param->value); + *length += 1; + output_buf = textbuf + *length; + SET_PSTATE_RESPONSE_SENT(param); + pr_debug("Sending key: %s=%s\n", + param->name, param->value); + continue; + } + if (IS_PSTATE_NEGOTIATE(param) && + !IS_PSTATE_ACCEPTOR(param) && + !IS_PSTATE_PROPOSER(param) && + (param->phase & phase)) { + *length += sprintf(output_buf, "%s=%s", + param->name, param->value); + *length += 1; + output_buf = textbuf + *length; + SET_PSTATE_PROPOSER(param); + iscsi_check_proposer_for_optional_reply(param, + keys_workaround); + pr_debug("Sending key: %s=%s\n", + param->name, param->value); + } + } + + list_for_each_entry(er, ¶m_list->extra_response_list, er_list) { + *length += sprintf(output_buf, "%s=%s", er->key, er->value); + *length += 1; + output_buf = textbuf + *length; + pr_debug("Sending key: %s=%s\n", er->key, er->value); + } + iscsi_release_extra_responses(param_list); + + return 0; +} + +int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list) +{ + int ret = 0; + struct iscsi_param *param; + + list_for_each_entry(param, ¶m_list->param_list, p_list) { + if (IS_PSTATE_NEGOTIATE(param) && + IS_PSTATE_PROPOSER(param) && + !IS_PSTATE_RESPONSE_GOT(param) && + !IS_PSTATE_REPLY_OPTIONAL(param) && + !IS_PHASE_DECLARATIVE(param)) { + pr_err("No response for proposed key \"%s\".\n", + param->name); + ret = -1; + } + } + + return ret; +} + +int iscsi_change_param_value( + char *keyvalue, + struct iscsi_param_list *param_list, + int check_key) +{ + char *key = NULL, *value = NULL; + struct iscsi_param *param; + int sender = 0; + + if (iscsi_extract_key_value(keyvalue, &key, &value) < 0) + return -1; + + if (!check_key) { + param = __iscsi_check_key(keyvalue, sender, param_list); + if (!param) + return -1; + } else { + param = iscsi_check_key(keyvalue, 0, sender, param_list); + if (!param) + return -1; + + param->set_param = 1; + if (iscsi_check_value(param, value) < 0) { + param->set_param = 0; + return -1; + } + param->set_param = 0; + } + + if (iscsi_update_param_value(param, value) < 0) + return -1; + + return 0; +} + +void iscsi_set_connection_parameters( + struct iscsi_conn_ops *ops, + struct iscsi_param_list *param_list) +{ + char *tmpptr; + struct iscsi_param *param; + + pr_debug("---------------------------------------------------" + "---------------\n"); + list_for_each_entry(param, ¶m_list->param_list, p_list) { + /* + * Special case to set MAXXMITDATASEGMENTLENGTH from the + * target requested MaxRecvDataSegmentLength, even though + * this key is not sent over the wire. + */ + if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { + ops->MaxXmitDataSegmentLength = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("MaxXmitDataSegmentLength: %s\n", + param->value); + } + + if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param)) + continue; + if (!strcmp(param->name, AUTHMETHOD)) { + pr_debug("AuthMethod: %s\n", + param->value); + } else if (!strcmp(param->name, HEADERDIGEST)) { + ops->HeaderDigest = !strcmp(param->value, CRC32C); + pr_debug("HeaderDigest: %s\n", + param->value); + } else if (!strcmp(param->name, DATADIGEST)) { + ops->DataDigest = !strcmp(param->value, CRC32C); + pr_debug("DataDigest: %s\n", + param->value); + } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { + /* + * At this point iscsi_check_acceptor_state() will have + * set ops->MaxRecvDataSegmentLength from the original + * initiator provided value. + */ + pr_debug("MaxRecvDataSegmentLength: %u\n", + ops->MaxRecvDataSegmentLength); + } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { + ops->InitiatorRecvDataSegmentLength = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("InitiatorRecvDataSegmentLength: %s\n", + param->value); + ops->MaxRecvDataSegmentLength = + ops->InitiatorRecvDataSegmentLength; + pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n"); + } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { + ops->TargetRecvDataSegmentLength = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("TargetRecvDataSegmentLength: %s\n", + param->value); + ops->MaxXmitDataSegmentLength = + ops->TargetRecvDataSegmentLength; + pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n"); + } + } + pr_debug("----------------------------------------------------" + "--------------\n"); +} + +void iscsi_set_session_parameters( + struct iscsi_sess_ops *ops, + struct iscsi_param_list *param_list, + int leading) +{ + char *tmpptr; + struct iscsi_param *param; + + pr_debug("----------------------------------------------------" + "--------------\n"); + list_for_each_entry(param, ¶m_list->param_list, p_list) { + if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param)) + continue; + if (!strcmp(param->name, INITIATORNAME)) { + if (!param->value) + continue; + if (leading) + snprintf(ops->InitiatorName, + sizeof(ops->InitiatorName), + "%s", param->value); + pr_debug("InitiatorName: %s\n", + param->value); + } else if (!strcmp(param->name, INITIATORALIAS)) { + if (!param->value) + continue; + snprintf(ops->InitiatorAlias, + sizeof(ops->InitiatorAlias), + "%s", param->value); + pr_debug("InitiatorAlias: %s\n", + param->value); + } else if (!strcmp(param->name, TARGETNAME)) { + if (!param->value) + continue; + if (leading) + snprintf(ops->TargetName, + sizeof(ops->TargetName), + "%s", param->value); + pr_debug("TargetName: %s\n", + param->value); + } else if (!strcmp(param->name, TARGETALIAS)) { + if (!param->value) + continue; + snprintf(ops->TargetAlias, sizeof(ops->TargetAlias), + "%s", param->value); + pr_debug("TargetAlias: %s\n", + param->value); + } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) { + ops->TargetPortalGroupTag = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("TargetPortalGroupTag: %s\n", + param->value); + } else if (!strcmp(param->name, MAXCONNECTIONS)) { + ops->MaxConnections = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("MaxConnections: %s\n", + param->value); + } else if (!strcmp(param->name, INITIALR2T)) { + ops->InitialR2T = !strcmp(param->value, YES); + pr_debug("InitialR2T: %s\n", + param->value); + } else if (!strcmp(param->name, IMMEDIATEDATA)) { + ops->ImmediateData = !strcmp(param->value, YES); + pr_debug("ImmediateData: %s\n", + param->value); + } else if (!strcmp(param->name, MAXBURSTLENGTH)) { + ops->MaxBurstLength = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("MaxBurstLength: %s\n", + param->value); + } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) { + ops->FirstBurstLength = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("FirstBurstLength: %s\n", + param->value); + } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) { + ops->DefaultTime2Wait = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("DefaultTime2Wait: %s\n", + param->value); + } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) { + ops->DefaultTime2Retain = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("DefaultTime2Retain: %s\n", + param->value); + } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) { + ops->MaxOutstandingR2T = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("MaxOutstandingR2T: %s\n", + param->value); + } else if (!strcmp(param->name, DATAPDUINORDER)) { + ops->DataPDUInOrder = !strcmp(param->value, YES); + pr_debug("DataPDUInOrder: %s\n", + param->value); + } else if (!strcmp(param->name, DATASEQUENCEINORDER)) { + ops->DataSequenceInOrder = !strcmp(param->value, YES); + pr_debug("DataSequenceInOrder: %s\n", + param->value); + } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) { + ops->ErrorRecoveryLevel = + simple_strtoul(param->value, &tmpptr, 0); + pr_debug("ErrorRecoveryLevel: %s\n", + param->value); + } else if (!strcmp(param->name, SESSIONTYPE)) { + ops->SessionType = !strcmp(param->value, DISCOVERY); + pr_debug("SessionType: %s\n", + param->value); + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { + ops->RDMAExtensions = !strcmp(param->value, YES); + pr_debug("RDMAExtensions: %s\n", + param->value); + } + } + pr_debug("----------------------------------------------------" + "--------------\n"); + +} diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h new file mode 100644 index 0000000000..00fbbebb8c --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_parameters.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_PARAMETERS_H +#define ISCSI_PARAMETERS_H + +#include <linux/types.h> +#include <scsi/iscsi_proto.h> + +struct iscsi_extra_response { + char key[KEY_MAXLEN]; + char value[32]; + struct list_head er_list; +} ____cacheline_aligned; + +struct iscsi_param { + char *name; + char *value; + u8 set_param; + u8 phase; + u8 scope; + u8 sender; + u8 type; + u8 use; + u16 type_range; + u32 state; + struct list_head p_list; +} ____cacheline_aligned; + +struct iscsit_conn; +struct iscsi_conn_ops; +struct iscsi_param_list; +struct iscsi_sess_ops; + +extern int iscsi_login_rx_data(struct iscsit_conn *, char *, int); +extern int iscsi_login_tx_data(struct iscsit_conn *, char *, char *, int); +extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *); +extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *); +extern void iscsi_print_params(struct iscsi_param_list *); +extern int iscsi_create_default_params(struct iscsi_param_list **); +extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool); +extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *); +extern int iscsi_copy_param_list(struct iscsi_param_list **, + struct iscsi_param_list *, int); +extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int); +extern void iscsi_release_param_list(struct iscsi_param_list *); +extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *); +extern int iscsi_extract_key_value(char *, char **, char **); +extern int iscsi_update_param_value(struct iscsi_param *, char *); +extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsit_conn *); +extern int iscsi_encode_text_output(u8, u8, char *, u32 *, + struct iscsi_param_list *, bool); +extern int iscsi_check_negotiated_keys(struct iscsi_param_list *); +extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *, + struct iscsi_param_list *); +extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, + struct iscsi_param_list *, int); + +#define YES "Yes" +#define NO "No" +#define ALL "All" +#define IRRELEVANT "Irrelevant" +#define NONE "None" +#define NOTUNDERSTOOD "NotUnderstood" +#define REJECT "Reject" + +/* + * The Parameter Names. + */ +#define AUTHMETHOD "AuthMethod" +#define HEADERDIGEST "HeaderDigest" +#define DATADIGEST "DataDigest" +#define MAXCONNECTIONS "MaxConnections" +#define SENDTARGETS "SendTargets" +#define TARGETNAME "TargetName" +#define INITIATORNAME "InitiatorName" +#define TARGETALIAS "TargetAlias" +#define INITIATORALIAS "InitiatorAlias" +#define TARGETADDRESS "TargetAddress" +#define TARGETPORTALGROUPTAG "TargetPortalGroupTag" +#define INITIALR2T "InitialR2T" +#define IMMEDIATEDATA "ImmediateData" +#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength" +#define MAXXMITDATASEGMENTLENGTH "MaxXmitDataSegmentLength" +#define MAXBURSTLENGTH "MaxBurstLength" +#define FIRSTBURSTLENGTH "FirstBurstLength" +#define DEFAULTTIME2WAIT "DefaultTime2Wait" +#define DEFAULTTIME2RETAIN "DefaultTime2Retain" +#define MAXOUTSTANDINGR2T "MaxOutstandingR2T" +#define DATAPDUINORDER "DataPDUInOrder" +#define DATASEQUENCEINORDER "DataSequenceInOrder" +#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel" +#define SESSIONTYPE "SessionType" +#define IFMARKER "IFMarker" +#define OFMARKER "OFMarker" +#define IFMARKINT "IFMarkInt" +#define OFMARKINT "OFMarkInt" + +/* + * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 + */ +#define RDMAEXTENSIONS "RDMAExtensions" +#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" +#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" + +/* + * For AuthMethod. + */ +#define KRB5 "KRB5" +#define SPKM1 "SPKM1" +#define SPKM2 "SPKM2" +#define SRP "SRP" +#define CHAP "CHAP" + +/* + * Initial values for Parameter Negotiation. + */ +#define INITIAL_AUTHMETHOD CHAP +#define INITIAL_HEADERDIGEST "CRC32C,None" +#define INITIAL_DATADIGEST "CRC32C,None" +#define INITIAL_MAXCONNECTIONS "1" +#define INITIAL_SENDTARGETS ALL +#define INITIAL_TARGETNAME "LIO.Target" +#define INITIAL_INITIATORNAME "LIO.Initiator" +#define INITIAL_TARGETALIAS "LIO Target" +#define INITIAL_INITIATORALIAS "LIO Initiator" +#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0" +#define INITIAL_TARGETPORTALGROUPTAG "1" +#define INITIAL_INITIALR2T YES +#define INITIAL_IMMEDIATEDATA YES +#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192" +/* + * Match outgoing MXDSL default to incoming Open-iSCSI default + */ +#define INITIAL_MAXXMITDATASEGMENTLENGTH "262144" +#define INITIAL_MAXBURSTLENGTH "262144" +#define INITIAL_FIRSTBURSTLENGTH "65536" +#define INITIAL_DEFAULTTIME2WAIT "2" +#define INITIAL_DEFAULTTIME2RETAIN "20" +#define INITIAL_MAXOUTSTANDINGR2T "1" +#define INITIAL_DATAPDUINORDER YES +#define INITIAL_DATASEQUENCEINORDER YES +#define INITIAL_ERRORRECOVERYLEVEL "0" +#define INITIAL_SESSIONTYPE NORMAL +#define INITIAL_IFMARKER NO +#define INITIAL_OFMARKER NO +#define INITIAL_IFMARKINT REJECT +#define INITIAL_OFMARKINT REJECT + +/* + * Initial values for iSER parameters following RFC-5046 Section 6 + */ +#define INITIAL_RDMAEXTENSIONS NO +#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" +#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" + +/* + * For [Header,Data]Digests. + */ +#define CRC32C "CRC32C" + +/* + * For SessionType. + */ +#define DISCOVERY "Discovery" +#define NORMAL "Normal" + +/* + * struct iscsi_param->use + */ +#define USE_LEADING_ONLY 0x01 +#define USE_INITIAL_ONLY 0x02 +#define USE_ALL 0x04 + +#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY) +#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY) +#define IS_USE_ALL(p) ((p)->use & USE_ALL) + +#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY) + +/* + * struct iscsi_param->sender + */ +#define SENDER_INITIATOR 0x01 +#define SENDER_TARGET 0x02 +#define SENDER_BOTH 0x03 +/* Used in iscsi_check_key() */ +#define SENDER_RECEIVER 0x04 + +#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR) +#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET) +#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH) + +/* + * struct iscsi_param->scope + */ +#define SCOPE_CONNECTION_ONLY 0x01 +#define SCOPE_SESSION_WIDE 0x02 + +#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY) +#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE) + +/* + * struct iscsi_param->phase + */ +#define PHASE_SECURITY 0x01 +#define PHASE_OPERATIONAL 0x02 +#define PHASE_DECLARATIVE 0x04 +#define PHASE_FFP0 0x08 + +#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY) +#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL) +#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE) +#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0) + +/* + * struct iscsi_param->type + */ +#define TYPE_BOOL_AND 0x01 +#define TYPE_BOOL_OR 0x02 +#define TYPE_NUMBER 0x04 +#define TYPE_NUMBER_RANGE 0x08 +#define TYPE_STRING 0x10 +#define TYPE_VALUE_LIST 0x20 + +#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND) +#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR) +#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER) +#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE) +#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING) +#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST) + +/* + * struct iscsi_param->type_range + */ +#define TYPERANGE_BOOL_AND 0x0001 +#define TYPERANGE_BOOL_OR 0x0002 +#define TYPERANGE_0_TO_2 0x0004 +#define TYPERANGE_0_TO_3600 0x0008 +#define TYPERANGE_0_TO_32767 0x0010 +#define TYPERANGE_0_TO_65535 0x0020 +#define TYPERANGE_1_TO_65535 0x0040 +#define TYPERANGE_2_TO_3600 0x0080 +#define TYPERANGE_512_TO_16777215 0x0100 +#define TYPERANGE_AUTH 0x0200 +#define TYPERANGE_DIGEST 0x0400 +#define TYPERANGE_ISCSINAME 0x0800 +#define TYPERANGE_SESSIONTYPE 0x1000 +#define TYPERANGE_TARGETADDRESS 0x2000 +#define TYPERANGE_UTF8 0x4000 + +#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2) +#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600) +#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767) +#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535) +#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535) +#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600) +#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \ + TYPERANGE_512_TO_16777215) +#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH) +#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST) +#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \ + TYPERANGE_SESSIONTYPE) + +/* + * struct iscsi_param->state + */ +#define PSTATE_ACCEPTOR 0x01 +#define PSTATE_NEGOTIATE 0x02 +#define PSTATE_PROPOSER 0x04 +#define PSTATE_IRRELEVANT 0x08 +#define PSTATE_REJECT 0x10 +#define PSTATE_REPLY_OPTIONAL 0x20 +#define PSTATE_RESPONSE_GOT 0x40 +#define PSTATE_RESPONSE_SENT 0x80 + +#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR) +#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE) +#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER) +#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT) +#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT) +#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL) +#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT) +#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT) + +#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR) +#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE) +#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER) +#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT) +#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT) +#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL) +#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT) +#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT) + +#endif /* ISCSI_PARAMETERS_H */ diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c new file mode 100644 index 0000000000..66de2b8de4 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c @@ -0,0 +1,690 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains main functions related to iSCSI DataSequenceInOrder=No + * and DataPDUInOrder=No. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/random.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_util.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_seq_pdu_list.h" + +#ifdef DEBUG +static void iscsit_dump_seq_list(struct iscsit_cmd *cmd) +{ + int i; + struct iscsi_seq *seq; + + pr_debug("Dumping Sequence List for ITT: 0x%08x:\n", + cmd->init_task_tag); + + for (i = 0; i < cmd->seq_count; i++) { + seq = &cmd->seq_list[i]; + pr_debug("i: %d, pdu_start: %d, pdu_count: %d," + " offset: %d, xfer_len: %d, seq_send_order: %d," + " seq_no: %d\n", i, seq->pdu_start, seq->pdu_count, + seq->offset, seq->xfer_len, seq->seq_send_order, + seq->seq_no); + } +} + +static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd) +{ + int i; + struct iscsi_pdu *pdu; + + pr_debug("Dumping PDU List for ITT: 0x%08x:\n", + cmd->init_task_tag); + + for (i = 0; i < cmd->pdu_count; i++) { + pdu = &cmd->pdu_list[i]; + pr_debug("i: %d, offset: %d, length: %d," + " pdu_send_order: %d, seq_no: %d\n", i, pdu->offset, + pdu->length, pdu->pdu_send_order, pdu->seq_no); + } +} +#else +static void iscsit_dump_seq_list(struct iscsit_cmd *cmd) {} +static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd) {} +#endif + +static void iscsit_ordered_seq_lists( + struct iscsit_cmd *cmd, + u8 type) +{ + u32 i, seq_count = 0; + + for (i = 0; i < cmd->seq_count; i++) { + if (cmd->seq_list[i].type != SEQTYPE_NORMAL) + continue; + cmd->seq_list[i].seq_send_order = seq_count++; + } +} + +static void iscsit_ordered_pdu_lists( + struct iscsit_cmd *cmd, + u8 type) +{ + u32 i, pdu_send_order = 0, seq_no = 0; + + for (i = 0; i < cmd->pdu_count; i++) { +redo: + if (cmd->pdu_list[i].seq_no == seq_no) { + cmd->pdu_list[i].pdu_send_order = pdu_send_order++; + continue; + } + seq_no++; + pdu_send_order = 0; + goto redo; + } +} + +/* + * Generate count random values into array. + * Use 0x80000000 to mark generates valued in array[]. + */ +static void iscsit_create_random_array(u32 *array, u32 count) +{ + int i, j, k; + + if (count == 1) { + array[0] = 0; + return; + } + + for (i = 0; i < count; i++) { +redo: + get_random_bytes(&j, sizeof(u32)); + j = (1 + (int) (9999 + 1) - j) % count; + for (k = 0; k < i + 1; k++) { + j |= 0x80000000; + if ((array[k] & 0x80000000) && (array[k] == j)) + goto redo; + } + array[i] = j; + } + + for (i = 0; i < count; i++) + array[i] &= ~0x80000000; +} + +static int iscsit_randomize_pdu_lists( + struct iscsit_cmd *cmd, + u8 type) +{ + int i = 0; + u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0; + + for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) { +redo: + if (cmd->pdu_list[pdu_count].seq_no == seq_no) { + seq_count++; + continue; + } + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); + if (!array) { + pr_err("Unable to allocate memory" + " for random array.\n"); + return -ENOMEM; + } + iscsit_create_random_array(array, seq_count); + + for (i = 0; i < seq_count; i++) + cmd->pdu_list[seq_offset+i].pdu_send_order = array[i]; + + kfree(array); + + seq_offset += seq_count; + seq_count = 0; + seq_no++; + goto redo; + } + + if (seq_count) { + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); + if (!array) { + pr_err("Unable to allocate memory for" + " random array.\n"); + return -ENOMEM; + } + iscsit_create_random_array(array, seq_count); + + for (i = 0; i < seq_count; i++) + cmd->pdu_list[seq_offset+i].pdu_send_order = array[i]; + + kfree(array); + } + + return 0; +} + +static int iscsit_randomize_seq_lists( + struct iscsit_cmd *cmd, + u8 type) +{ + int i, j = 0; + u32 *array, seq_count = cmd->seq_count; + + if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED)) + seq_count--; + else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED) + seq_count -= 2; + + if (!seq_count) + return 0; + + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); + if (!array) { + pr_err("Unable to allocate memory for random array.\n"); + return -ENOMEM; + } + iscsit_create_random_array(array, seq_count); + + for (i = 0; i < cmd->seq_count; i++) { + if (cmd->seq_list[i].type != SEQTYPE_NORMAL) + continue; + cmd->seq_list[i].seq_send_order = array[j++]; + } + + kfree(array); + return 0; +} + +static void iscsit_determine_counts_for_list( + struct iscsit_cmd *cmd, + struct iscsi_build_list *bl, + u32 *seq_count, + u32 *pdu_count) +{ + int check_immediate = 0; + u32 burstlength = 0, offset = 0; + u32 unsolicited_data_length = 0; + u32 mdsl; + struct iscsit_conn *conn = cmd->conn; + + if (cmd->se_cmd.data_direction == DMA_TO_DEVICE) + mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength; + else + mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength; + + if ((bl->type == PDULIST_IMMEDIATE) || + (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) + check_immediate = 1; + + if ((bl->type == PDULIST_UNSOLICITED) || + (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) + unsolicited_data_length = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); + + while (offset < cmd->se_cmd.data_length) { + *pdu_count += 1; + + if (check_immediate) { + check_immediate = 0; + offset += bl->immediate_data_length; + *seq_count += 1; + if (unsolicited_data_length) + unsolicited_data_length -= + bl->immediate_data_length; + continue; + } + if (unsolicited_data_length > 0) { + if ((offset + mdsl) >= cmd->se_cmd.data_length) { + unsolicited_data_length -= + (cmd->se_cmd.data_length - offset); + offset += (cmd->se_cmd.data_length - offset); + continue; + } + if ((offset + mdsl) + >= conn->sess->sess_ops->FirstBurstLength) { + unsolicited_data_length -= + (conn->sess->sess_ops->FirstBurstLength - + offset); + offset += (conn->sess->sess_ops->FirstBurstLength - + offset); + burstlength = 0; + *seq_count += 1; + continue; + } + + offset += mdsl; + unsolicited_data_length -= mdsl; + continue; + } + if ((offset + mdsl) >= cmd->se_cmd.data_length) { + offset += (cmd->se_cmd.data_length - offset); + continue; + } + if ((burstlength + mdsl) >= + conn->sess->sess_ops->MaxBurstLength) { + offset += (conn->sess->sess_ops->MaxBurstLength - + burstlength); + burstlength = 0; + *seq_count += 1; + continue; + } + + burstlength += mdsl; + offset += mdsl; + } +} + + +/* + * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No + * or DataPDUInOrder=No. + */ +static int iscsit_do_build_pdu_and_seq_lists( + struct iscsit_cmd *cmd, + struct iscsi_build_list *bl) +{ + int check_immediate = 0, datapduinorder, datasequenceinorder; + u32 burstlength = 0, offset = 0, i = 0, mdsl; + u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_pdu *pdu = cmd->pdu_list; + struct iscsi_seq *seq = cmd->seq_list; + + if (cmd->se_cmd.data_direction == DMA_TO_DEVICE) + mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength; + else + mdsl = cmd->conn->conn_ops->MaxRecvDataSegmentLength; + + datapduinorder = conn->sess->sess_ops->DataPDUInOrder; + datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder; + + if ((bl->type == PDULIST_IMMEDIATE) || + (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) + check_immediate = 1; + + if ((bl->type == PDULIST_UNSOLICITED) || + (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) + unsolicited_data_length = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); + + while (offset < cmd->se_cmd.data_length) { + pdu_count++; + if (!datapduinorder) { + pdu[i].offset = offset; + pdu[i].seq_no = seq_no; + } + if (!datasequenceinorder && (pdu_count == 1)) { + seq[seq_no].pdu_start = i; + seq[seq_no].seq_no = seq_no; + seq[seq_no].offset = offset; + seq[seq_no].orig_offset = offset; + } + + if (check_immediate) { + check_immediate = 0; + if (!datapduinorder) { + pdu[i].type = PDUTYPE_IMMEDIATE; + pdu[i++].length = bl->immediate_data_length; + } + if (!datasequenceinorder) { + seq[seq_no].type = SEQTYPE_IMMEDIATE; + seq[seq_no].pdu_count = 1; + seq[seq_no].xfer_len = + bl->immediate_data_length; + } + offset += bl->immediate_data_length; + pdu_count = 0; + seq_no++; + if (unsolicited_data_length) + unsolicited_data_length -= + bl->immediate_data_length; + continue; + } + if (unsolicited_data_length > 0) { + if ((offset + mdsl) >= cmd->se_cmd.data_length) { + if (!datapduinorder) { + pdu[i].type = PDUTYPE_UNSOLICITED; + pdu[i].length = + (cmd->se_cmd.data_length - offset); + } + if (!datasequenceinorder) { + seq[seq_no].type = SEQTYPE_UNSOLICITED; + seq[seq_no].pdu_count = pdu_count; + seq[seq_no].xfer_len = (burstlength + + (cmd->se_cmd.data_length - offset)); + } + unsolicited_data_length -= + (cmd->se_cmd.data_length - offset); + offset += (cmd->se_cmd.data_length - offset); + continue; + } + if ((offset + mdsl) >= + conn->sess->sess_ops->FirstBurstLength) { + if (!datapduinorder) { + pdu[i].type = PDUTYPE_UNSOLICITED; + pdu[i++].length = + (conn->sess->sess_ops->FirstBurstLength - + offset); + } + if (!datasequenceinorder) { + seq[seq_no].type = SEQTYPE_UNSOLICITED; + seq[seq_no].pdu_count = pdu_count; + seq[seq_no].xfer_len = (burstlength + + (conn->sess->sess_ops->FirstBurstLength - + offset)); + } + unsolicited_data_length -= + (conn->sess->sess_ops->FirstBurstLength - + offset); + offset += (conn->sess->sess_ops->FirstBurstLength - + offset); + burstlength = 0; + pdu_count = 0; + seq_no++; + continue; + } + + if (!datapduinorder) { + pdu[i].type = PDUTYPE_UNSOLICITED; + pdu[i++].length = mdsl; + } + burstlength += mdsl; + offset += mdsl; + unsolicited_data_length -= mdsl; + continue; + } + if ((offset + mdsl) >= cmd->se_cmd.data_length) { + if (!datapduinorder) { + pdu[i].type = PDUTYPE_NORMAL; + pdu[i].length = (cmd->se_cmd.data_length - offset); + } + if (!datasequenceinorder) { + seq[seq_no].type = SEQTYPE_NORMAL; + seq[seq_no].pdu_count = pdu_count; + seq[seq_no].xfer_len = (burstlength + + (cmd->se_cmd.data_length - offset)); + } + offset += (cmd->se_cmd.data_length - offset); + continue; + } + if ((burstlength + mdsl) >= + conn->sess->sess_ops->MaxBurstLength) { + if (!datapduinorder) { + pdu[i].type = PDUTYPE_NORMAL; + pdu[i++].length = + (conn->sess->sess_ops->MaxBurstLength - + burstlength); + } + if (!datasequenceinorder) { + seq[seq_no].type = SEQTYPE_NORMAL; + seq[seq_no].pdu_count = pdu_count; + seq[seq_no].xfer_len = (burstlength + + (conn->sess->sess_ops->MaxBurstLength - + burstlength)); + } + offset += (conn->sess->sess_ops->MaxBurstLength - + burstlength); + burstlength = 0; + pdu_count = 0; + seq_no++; + continue; + } + + if (!datapduinorder) { + pdu[i].type = PDUTYPE_NORMAL; + pdu[i++].length = mdsl; + } + burstlength += mdsl; + offset += mdsl; + } + + if (!datasequenceinorder) { + if (bl->data_direction & ISCSI_PDU_WRITE) { + if (bl->randomize & RANDOM_R2T_OFFSETS) { + if (iscsit_randomize_seq_lists(cmd, bl->type) + < 0) + return -1; + } else + iscsit_ordered_seq_lists(cmd, bl->type); + } else if (bl->data_direction & ISCSI_PDU_READ) { + if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) { + if (iscsit_randomize_seq_lists(cmd, bl->type) + < 0) + return -1; + } else + iscsit_ordered_seq_lists(cmd, bl->type); + } + + iscsit_dump_seq_list(cmd); + } + if (!datapduinorder) { + if (bl->data_direction & ISCSI_PDU_WRITE) { + if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) { + if (iscsit_randomize_pdu_lists(cmd, bl->type) + < 0) + return -1; + } else + iscsit_ordered_pdu_lists(cmd, bl->type); + } else if (bl->data_direction & ISCSI_PDU_READ) { + if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) { + if (iscsit_randomize_pdu_lists(cmd, bl->type) + < 0) + return -1; + } else + iscsit_ordered_pdu_lists(cmd, bl->type); + } + + iscsit_dump_pdu_list(cmd); + } + + return 0; +} + +int iscsit_build_pdu_and_seq_lists( + struct iscsit_cmd *cmd, + u32 immediate_data_length) +{ + struct iscsi_build_list bl; + u32 pdu_count = 0, seq_count = 1; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_pdu *pdu = NULL; + struct iscsi_seq *seq = NULL; + + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na; + + /* + * Do nothing if no OOO shenanigans + */ + if (sess->sess_ops->DataSequenceInOrder && + sess->sess_ops->DataPDUInOrder) + return 0; + + if (cmd->data_direction == DMA_NONE) + return 0; + + na = iscsit_tpg_get_node_attrib(sess); + memset(&bl, 0, sizeof(struct iscsi_build_list)); + + if (cmd->data_direction == DMA_FROM_DEVICE) { + bl.data_direction = ISCSI_PDU_READ; + bl.type = PDULIST_NORMAL; + if (na->random_datain_pdu_offsets) + bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; + if (na->random_datain_seq_offsets) + bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; + } else { + bl.data_direction = ISCSI_PDU_WRITE; + bl.immediate_data_length = immediate_data_length; + if (na->random_r2t_offsets) + bl.randomize |= RANDOM_R2T_OFFSETS; + + if (!cmd->immediate_data && !cmd->unsolicited_data) + bl.type = PDULIST_NORMAL; + else if (cmd->immediate_data && !cmd->unsolicited_data) + bl.type = PDULIST_IMMEDIATE; + else if (!cmd->immediate_data && cmd->unsolicited_data) + bl.type = PDULIST_UNSOLICITED; + else if (cmd->immediate_data && cmd->unsolicited_data) + bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; + } + + iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count); + + if (!conn->sess->sess_ops->DataSequenceInOrder) { + seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC); + if (!seq) { + pr_err("Unable to allocate struct iscsi_seq list\n"); + return -ENOMEM; + } + cmd->seq_list = seq; + cmd->seq_count = seq_count; + } + + if (!conn->sess->sess_ops->DataPDUInOrder) { + pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC); + if (!pdu) { + pr_err("Unable to allocate struct iscsi_pdu list.\n"); + kfree(seq); + return -ENOMEM; + } + cmd->pdu_list = pdu; + cmd->pdu_count = pdu_count; + } + + return iscsit_do_build_pdu_and_seq_lists(cmd, &bl); +} + +struct iscsi_pdu *iscsit_get_pdu_holder( + struct iscsit_cmd *cmd, + u32 offset, + u32 length) +{ + u32 i; + struct iscsi_pdu *pdu = NULL; + + if (!cmd->pdu_list) { + pr_err("struct iscsit_cmd->pdu_list is NULL!\n"); + return NULL; + } + + pdu = &cmd->pdu_list[0]; + + for (i = 0; i < cmd->pdu_count; i++) + if ((pdu[i].offset == offset) && (pdu[i].length == length)) + return &pdu[i]; + + pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:" + " %u, Length: %u\n", cmd->init_task_tag, offset, length); + return NULL; +} + +struct iscsi_pdu *iscsit_get_pdu_holder_for_seq( + struct iscsit_cmd *cmd, + struct iscsi_seq *seq) +{ + u32 i; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_pdu *pdu = NULL; + + if (!cmd->pdu_list) { + pr_err("struct iscsit_cmd->pdu_list is NULL!\n"); + return NULL; + } + + if (conn->sess->sess_ops->DataSequenceInOrder) { +redo: + pdu = &cmd->pdu_list[cmd->pdu_start]; + + for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) { + pr_debug("pdu[i].seq_no: %d, pdu[i].pdu" + "_send_order: %d, pdu[i].offset: %d," + " pdu[i].length: %d\n", pdu[i].seq_no, + pdu[i].pdu_send_order, pdu[i].offset, + pdu[i].length); + + if (pdu[i].pdu_send_order == cmd->pdu_send_order) { + cmd->pdu_send_order++; + return &pdu[i]; + } + } + + cmd->pdu_start += cmd->pdu_send_order; + cmd->pdu_send_order = 0; + cmd->seq_no++; + + if (cmd->pdu_start < cmd->pdu_count) + goto redo; + + pr_err("Command ITT: 0x%08x unable to locate" + " struct iscsi_pdu for cmd->pdu_send_order: %u.\n", + cmd->init_task_tag, cmd->pdu_send_order); + return NULL; + } else { + if (!seq) { + pr_err("struct iscsi_seq is NULL!\n"); + return NULL; + } + + pr_debug("seq->pdu_start: %d, seq->pdu_count: %d," + " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count, + seq->seq_no); + + pdu = &cmd->pdu_list[seq->pdu_start]; + + if (seq->pdu_send_order == seq->pdu_count) { + pr_err("Command ITT: 0x%08x seq->pdu_send" + "_order: %u equals seq->pdu_count: %u\n", + cmd->init_task_tag, seq->pdu_send_order, + seq->pdu_count); + return NULL; + } + + for (i = 0; i < seq->pdu_count; i++) { + if (pdu[i].pdu_send_order == seq->pdu_send_order) { + seq->pdu_send_order++; + return &pdu[i]; + } + } + + pr_err("Command ITT: 0x%08x unable to locate iscsi" + "_pdu_t for seq->pdu_send_order: %u.\n", + cmd->init_task_tag, seq->pdu_send_order); + return NULL; + } + + return NULL; +} + +struct iscsi_seq *iscsit_get_seq_holder( + struct iscsit_cmd *cmd, + u32 offset, + u32 length) +{ + u32 i; + + if (!cmd->seq_list) { + pr_err("struct iscsit_cmd->seq_list is NULL!\n"); + return NULL; + } + + for (i = 0; i < cmd->seq_count; i++) { + pr_debug("seq_list[i].orig_offset: %d, seq_list[i]." + "xfer_len: %d, seq_list[i].seq_no %u\n", + cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len, + cmd->seq_list[i].seq_no); + + if ((cmd->seq_list[i].orig_offset + + cmd->seq_list[i].xfer_len) >= + (offset + length)) + return &cmd->seq_list[i]; + } + + pr_err("Unable to locate Sequence holder for ITT: 0x%08x," + " Offset: %u, Length: %u\n", cmd->init_task_tag, offset, + length); + return NULL; +} diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h new file mode 100644 index 0000000000..288298f9f1 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_SEQ_AND_PDU_LIST_H +#define ISCSI_SEQ_AND_PDU_LIST_H + +#include <linux/types.h> +#include <linux/cache.h> + +/* struct iscsi_pdu->status */ +#define DATAOUT_PDU_SENT 1 + +/* struct iscsi_seq->type */ +#define SEQTYPE_IMMEDIATE 1 +#define SEQTYPE_UNSOLICITED 2 +#define SEQTYPE_NORMAL 3 + +/* struct iscsi_seq->status */ +#define DATAOUT_SEQUENCE_GOT_R2T 1 +#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2 +#define DATAOUT_SEQUENCE_COMPLETE 3 + +/* iscsi_determine_counts_for_list() type */ +#define PDULIST_NORMAL 1 +#define PDULIST_IMMEDIATE 2 +#define PDULIST_UNSOLICITED 3 +#define PDULIST_IMMEDIATE_AND_UNSOLICITED 4 + +/* struct iscsi_pdu->type */ +#define PDUTYPE_IMMEDIATE 1 +#define PDUTYPE_UNSOLICITED 2 +#define PDUTYPE_NORMAL 3 + +/* struct iscsi_pdu->status */ +#define ISCSI_PDU_NOT_RECEIVED 0 +#define ISCSI_PDU_RECEIVED_OK 1 +#define ISCSI_PDU_CRC_FAILED 2 +#define ISCSI_PDU_TIMED_OUT 3 + +/* struct iscsi_build_list->randomize */ +#define RANDOM_DATAIN_PDU_OFFSETS 0x01 +#define RANDOM_DATAIN_SEQ_OFFSETS 0x02 +#define RANDOM_DATAOUT_PDU_OFFSETS 0x04 +#define RANDOM_R2T_OFFSETS 0x08 + +/* struct iscsi_build_list->data_direction */ +#define ISCSI_PDU_READ 0x01 +#define ISCSI_PDU_WRITE 0x02 + +struct iscsi_build_list { + int data_direction; + int randomize; + int type; + int immediate_data_length; +}; + +struct iscsi_pdu { + int status; + int type; + u8 flags; + u32 data_sn; + u32 length; + u32 offset; + u32 pdu_send_order; + u32 seq_no; +} ____cacheline_aligned; + +struct iscsi_seq { + int sent; + int status; + int type; + u32 data_sn; + u32 first_datasn; + u32 last_datasn; + u32 next_burst_len; + u32 pdu_start; + u32 pdu_count; + u32 offset; + u32 orig_offset; + u32 pdu_send_order; + u32 r2t_sn; + u32 seq_send_order; + u32 seq_no; + u32 xfer_len; +} ____cacheline_aligned; + +struct iscsit_cmd; + +extern int iscsit_build_pdu_and_seq_lists(struct iscsit_cmd *, u32); +extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsit_cmd *, u32, u32); +extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsit_cmd *, struct iscsi_seq *); +extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsit_cmd *, u32, u32); + +#endif /* ISCSI_SEQ_AND_PDU_LIST_H */ diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c new file mode 100644 index 0000000000..367c6468b8 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -0,0 +1,798 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Modern ConfigFS group context specific iSCSI statistics based on original + * iscsi_target_mib.c code + * + * Copyright (c) 2011-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/configfs.h> +#include <linux/export.h> +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_parameters.h" +#include "iscsi_target_device.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include <target/iscsi/iscsi_target_stat.h> + +#ifndef INITIAL_JIFFIES +#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) +#endif + +/* Instance Attributes Table */ +#define ISCSI_INST_NUM_NODES 1 +#define ISCSI_INST_DESCR "Storage Engine Target" +#define ISCSI_DISCONTINUITY_TIME 0 + +#define ISCSI_NODE_INDEX 1 + +#define ISPRINT(a) ((a >= ' ') && (a <= '~')) + +/**************************************************************************** + * iSCSI MIB Tables + ****************************************************************************/ +/* + * Instance Attributes Table + */ +static struct iscsi_tiqn *iscsi_instance_tiqn(struct config_item *item) +{ + struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item), + struct iscsi_wwn_stat_grps, iscsi_instance_group); + return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); +} + +static ssize_t iscsi_stat_instance_inst_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + iscsi_instance_tiqn(item)->tiqn_index); +} + +static ssize_t iscsi_stat_instance_min_ver_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); +} + +static ssize_t iscsi_stat_instance_max_ver_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); +} + +static ssize_t iscsi_stat_instance_portals_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + iscsi_instance_tiqn(item)->tiqn_num_tpg_nps); +} + +static ssize_t iscsi_stat_instance_nodes_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES); +} + +static ssize_t iscsi_stat_instance_sessions_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + iscsi_instance_tiqn(item)->tiqn_nsessions); +} + +static ssize_t iscsi_stat_instance_fail_sess_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item); + struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; + u32 sess_err_count; + + spin_lock_bh(&sess_err->lock); + sess_err_count = (sess_err->digest_errors + + sess_err->cxn_timeout_errors + + sess_err->pdu_format_errors); + spin_unlock_bh(&sess_err->lock); + + return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count); +} + +static ssize_t iscsi_stat_instance_fail_type_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item); + struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; + + return snprintf(page, PAGE_SIZE, "%u\n", + sess_err->last_sess_failure_type); +} + +static ssize_t iscsi_stat_instance_fail_rem_name_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item); + struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; + + return snprintf(page, PAGE_SIZE, "%s\n", + sess_err->last_sess_fail_rem_name[0] ? + sess_err->last_sess_fail_rem_name : NONE); +} + +static ssize_t iscsi_stat_instance_disc_time_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME); +} + +static ssize_t iscsi_stat_instance_description_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR); +} + +static ssize_t iscsi_stat_instance_vendor_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n"); +} + +static ssize_t iscsi_stat_instance_version_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION); +} + +CONFIGFS_ATTR_RO(iscsi_stat_instance_, inst); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, min_ver); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, max_ver); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, portals); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, nodes); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, sessions); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_sess); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_type); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_rem_name); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, disc_time); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, description); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, vendor); +CONFIGFS_ATTR_RO(iscsi_stat_instance_, version); + +static struct configfs_attribute *iscsi_stat_instance_attrs[] = { + &iscsi_stat_instance_attr_inst, + &iscsi_stat_instance_attr_min_ver, + &iscsi_stat_instance_attr_max_ver, + &iscsi_stat_instance_attr_portals, + &iscsi_stat_instance_attr_nodes, + &iscsi_stat_instance_attr_sessions, + &iscsi_stat_instance_attr_fail_sess, + &iscsi_stat_instance_attr_fail_type, + &iscsi_stat_instance_attr_fail_rem_name, + &iscsi_stat_instance_attr_disc_time, + &iscsi_stat_instance_attr_description, + &iscsi_stat_instance_attr_vendor, + &iscsi_stat_instance_attr_version, + NULL, +}; + +const struct config_item_type iscsi_stat_instance_cit = { + .ct_attrs = iscsi_stat_instance_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Instance Session Failure Stats Table + */ +static struct iscsi_tiqn *iscsi_sess_err_tiqn(struct config_item *item) +{ + struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item), + struct iscsi_wwn_stat_grps, iscsi_sess_err_group); + return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); +} + +static ssize_t iscsi_stat_sess_err_inst_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + iscsi_sess_err_tiqn(item)->tiqn_index); +} + +static ssize_t iscsi_stat_sess_err_digest_errors_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item); + struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; + + return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors); +} + +static ssize_t iscsi_stat_sess_err_cxn_errors_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item); + struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; + + return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors); +} + +static ssize_t iscsi_stat_sess_err_format_errors_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item); + struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; + + return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors); +} + +CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, inst); +CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, digest_errors); +CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, cxn_errors); +CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, format_errors); + +static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = { + &iscsi_stat_sess_err_attr_inst, + &iscsi_stat_sess_err_attr_digest_errors, + &iscsi_stat_sess_err_attr_cxn_errors, + &iscsi_stat_sess_err_attr_format_errors, + NULL, +}; + +const struct config_item_type iscsi_stat_sess_err_cit = { + .ct_attrs = iscsi_stat_sess_err_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Target Attributes Table + */ +static struct iscsi_tiqn *iscsi_tgt_attr_tiqn(struct config_item *item) +{ + struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item), + struct iscsi_wwn_stat_grps, iscsi_tgt_attr_group); + return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); +} + +static ssize_t iscsi_stat_tgt_attr_inst_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + iscsi_tgt_attr_tiqn(item)->tiqn_index); +} + +static ssize_t iscsi_stat_tgt_attr_indx_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); +} + +static ssize_t iscsi_stat_tgt_attr_login_fails_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + u32 fail_count; + + spin_lock(&lstat->lock); + fail_count = (lstat->redirects + lstat->authorize_fails + + lstat->authenticate_fails + lstat->negotiate_fails + + lstat->other_fails); + spin_unlock(&lstat->lock); + + return snprintf(page, PAGE_SIZE, "%u\n", fail_count); +} + +static ssize_t iscsi_stat_tgt_attr_last_fail_time_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + u32 last_fail_time; + + spin_lock(&lstat->lock); + last_fail_time = lstat->last_fail_time ? + (u32)(((u32)lstat->last_fail_time - + INITIAL_JIFFIES) * 100 / HZ) : 0; + spin_unlock(&lstat->lock); + + return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time); +} + +static ssize_t iscsi_stat_tgt_attr_last_fail_type_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + u32 last_fail_type; + + spin_lock(&lstat->lock); + last_fail_type = lstat->last_fail_type; + spin_unlock(&lstat->lock); + + return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type); +} + +static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + unsigned char buf[ISCSI_IQN_LEN]; + + spin_lock(&lstat->lock); + snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ? + lstat->last_intr_fail_name : NONE); + spin_unlock(&lstat->lock); + + return snprintf(page, PAGE_SIZE, "%s\n", buf); +} + +static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_type_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + int ret; + + spin_lock(&lstat->lock); + if (lstat->last_intr_fail_ip_family == AF_INET6) + ret = snprintf(page, PAGE_SIZE, "ipv6\n"); + else + ret = snprintf(page, PAGE_SIZE, "ipv4\n"); + spin_unlock(&lstat->lock); + + return ret; +} + +static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + int ret; + + spin_lock(&lstat->lock); + ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr); + spin_unlock(&lstat->lock); + + return ret; +} + +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, inst); +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, indx); +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, login_fails); +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_time); +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_type); +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_name); +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr_type); +CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr); + +static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = { + &iscsi_stat_tgt_attr_attr_inst, + &iscsi_stat_tgt_attr_attr_indx, + &iscsi_stat_tgt_attr_attr_login_fails, + &iscsi_stat_tgt_attr_attr_last_fail_time, + &iscsi_stat_tgt_attr_attr_last_fail_type, + &iscsi_stat_tgt_attr_attr_fail_intr_name, + &iscsi_stat_tgt_attr_attr_fail_intr_addr_type, + &iscsi_stat_tgt_attr_attr_fail_intr_addr, + NULL, +}; + +const struct config_item_type iscsi_stat_tgt_attr_cit = { + .ct_attrs = iscsi_stat_tgt_attr_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Target Login Stats Table + */ +static struct iscsi_tiqn *iscsi_login_stat_tiqn(struct config_item *item) +{ + struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item), + struct iscsi_wwn_stat_grps, iscsi_login_stats_group); + return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); +} + +static ssize_t iscsi_stat_login_inst_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + iscsi_login_stat_tiqn(item)->tiqn_index); +} + +static ssize_t iscsi_stat_login_indx_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); +} + +static ssize_t iscsi_stat_login_accepts_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + ssize_t ret; + + spin_lock(&lstat->lock); + ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts); + spin_unlock(&lstat->lock); + + return ret; +} + +static ssize_t iscsi_stat_login_other_fails_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + ssize_t ret; + + spin_lock(&lstat->lock); + ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails); + spin_unlock(&lstat->lock); + + return ret; +} + +static ssize_t iscsi_stat_login_redirects_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + ssize_t ret; + + spin_lock(&lstat->lock); + ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects); + spin_unlock(&lstat->lock); + + return ret; +} + +static ssize_t iscsi_stat_login_authorize_fails_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + ssize_t ret; + + spin_lock(&lstat->lock); + ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails); + spin_unlock(&lstat->lock); + + return ret; +} + +static ssize_t iscsi_stat_login_authenticate_fails_show( + struct config_item *item, char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + ssize_t ret; + + spin_lock(&lstat->lock); + ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails); + spin_unlock(&lstat->lock); + + return ret; +} + +static ssize_t iscsi_stat_login_negotiate_fails_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item); + struct iscsi_login_stats *lstat = &tiqn->login_stats; + ssize_t ret; + + spin_lock(&lstat->lock); + ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails); + spin_unlock(&lstat->lock); + + return ret; +} + +CONFIGFS_ATTR_RO(iscsi_stat_login_, inst); +CONFIGFS_ATTR_RO(iscsi_stat_login_, indx); +CONFIGFS_ATTR_RO(iscsi_stat_login_, accepts); +CONFIGFS_ATTR_RO(iscsi_stat_login_, other_fails); +CONFIGFS_ATTR_RO(iscsi_stat_login_, redirects); +CONFIGFS_ATTR_RO(iscsi_stat_login_, authorize_fails); +CONFIGFS_ATTR_RO(iscsi_stat_login_, authenticate_fails); +CONFIGFS_ATTR_RO(iscsi_stat_login_, negotiate_fails); + +static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = { + &iscsi_stat_login_attr_inst, + &iscsi_stat_login_attr_indx, + &iscsi_stat_login_attr_accepts, + &iscsi_stat_login_attr_other_fails, + &iscsi_stat_login_attr_redirects, + &iscsi_stat_login_attr_authorize_fails, + &iscsi_stat_login_attr_authenticate_fails, + &iscsi_stat_login_attr_negotiate_fails, + NULL, +}; + +const struct config_item_type iscsi_stat_login_cit = { + .ct_attrs = iscsi_stat_login_stats_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Target Logout Stats Table + */ +static struct iscsi_tiqn *iscsi_logout_stat_tiqn(struct config_item *item) +{ + struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item), + struct iscsi_wwn_stat_grps, iscsi_logout_stats_group); + return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); +} + +static ssize_t iscsi_stat_logout_inst_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + iscsi_logout_stat_tiqn(item)->tiqn_index); +} + +static ssize_t iscsi_stat_logout_indx_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); +} + +static ssize_t iscsi_stat_logout_normal_logouts_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item); + struct iscsi_logout_stats *lstats = &tiqn->logout_stats; + + return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts); +} + +static ssize_t iscsi_stat_logout_abnormal_logouts_show(struct config_item *item, + char *page) +{ + struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item); + struct iscsi_logout_stats *lstats = &tiqn->logout_stats; + + return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts); +} + +CONFIGFS_ATTR_RO(iscsi_stat_logout_, inst); +CONFIGFS_ATTR_RO(iscsi_stat_logout_, indx); +CONFIGFS_ATTR_RO(iscsi_stat_logout_, normal_logouts); +CONFIGFS_ATTR_RO(iscsi_stat_logout_, abnormal_logouts); + +static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = { + &iscsi_stat_logout_attr_inst, + &iscsi_stat_logout_attr_indx, + &iscsi_stat_logout_attr_normal_logouts, + &iscsi_stat_logout_attr_abnormal_logouts, + NULL, +}; + +const struct config_item_type iscsi_stat_logout_cit = { + .ct_attrs = iscsi_stat_logout_stats_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Session Stats Table + */ +static struct iscsi_node_acl *iscsi_stat_nacl(struct config_item *item) +{ + struct iscsi_node_stat_grps *igrps = container_of(to_config_group(item), + struct iscsi_node_stat_grps, iscsi_sess_stats_group); + return container_of(igrps, struct iscsi_node_acl, node_stat_grps); +} + +static ssize_t iscsi_stat_sess_inst_show(struct config_item *item, char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn; + struct iscsi_tiqn *tiqn = container_of(wwn, + struct iscsi_tiqn, tiqn_wwn); + + return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); +} + +static ssize_t iscsi_stat_sess_node_show(struct config_item *item, char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%u\n", + sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +static ssize_t iscsi_stat_sess_indx_show(struct config_item *item, char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%u\n", + sess->session_index); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +static ssize_t iscsi_stat_sess_cmd_pdus_show(struct config_item *item, + char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->cmd_pdus)); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +static ssize_t iscsi_stat_sess_rsp_pdus_show(struct config_item *item, + char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rsp_pdus)); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +static ssize_t iscsi_stat_sess_txdata_octs_show(struct config_item *item, + char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->tx_data_octets)); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +static ssize_t iscsi_stat_sess_rxdata_octs_show(struct config_item *item, + char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rx_data_octets)); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +static ssize_t iscsi_stat_sess_conn_digest_errors_show(struct config_item *item, + char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_digest_errors)); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +static ssize_t iscsi_stat_sess_conn_timeout_errors_show( + struct config_item *item, char *page) +{ + struct iscsi_node_acl *acl = iscsi_stat_nacl(item); + struct se_node_acl *se_nacl = &acl->se_node_acl; + struct iscsit_session *sess; + struct se_session *se_sess; + ssize_t ret = 0; + + spin_lock_bh(&se_nacl->nacl_sess_lock); + se_sess = se_nacl->nacl_sess; + if (se_sess) { + sess = se_sess->fabric_sess_ptr; + if (sess) + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_timeout_errors)); + } + spin_unlock_bh(&se_nacl->nacl_sess_lock); + + return ret; +} + +CONFIGFS_ATTR_RO(iscsi_stat_sess_, inst); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, node); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, indx); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, cmd_pdus); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, rsp_pdus); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, txdata_octs); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, rxdata_octs); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_digest_errors); +CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_timeout_errors); + +static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = { + &iscsi_stat_sess_attr_inst, + &iscsi_stat_sess_attr_node, + &iscsi_stat_sess_attr_indx, + &iscsi_stat_sess_attr_cmd_pdus, + &iscsi_stat_sess_attr_rsp_pdus, + &iscsi_stat_sess_attr_txdata_octs, + &iscsi_stat_sess_attr_rxdata_octs, + &iscsi_stat_sess_attr_conn_digest_errors, + &iscsi_stat_sess_attr_conn_timeout_errors, + NULL, +}; + +const struct config_item_type iscsi_stat_sess_cit = { + .ct_attrs = iscsi_stat_sess_stats_attrs, + .ct_owner = THIS_MODULE, +}; diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c new file mode 100644 index 0000000000..afc801f255 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains the iSCSI Target specific Task Management functions. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <asm/unaligned.h> +#include <scsi/scsi_proto.h> +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_seq_pdu_list.h" +#include "iscsi_target_datain_values.h" +#include "iscsi_target_device.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_erl1.h" +#include "iscsi_target_erl2.h" +#include "iscsi_target_tmr.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" + +u8 iscsit_tmr_abort_task( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct iscsit_cmd *ref_cmd; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_tmr_req *tmr_req = cmd->tmr_req; + struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; + struct iscsi_tm *hdr = (struct iscsi_tm *) buf; + + ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt); + if (!ref_cmd) { + pr_err("Unable to locate RefTaskTag: 0x%08x on CID:" + " %hu.\n", hdr->rtt, conn->cid); + return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) && + iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), (u32) atomic_read(&conn->sess->max_cmd_sn))) ? + ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK; + } + if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) { + pr_err("RefCmdSN 0x%08x does not equal" + " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n", + hdr->refcmdsn, ref_cmd->cmd_sn); + return ISCSI_TMF_RSP_REJECTED; + } + + se_tmr->ref_task_tag = (__force u32)hdr->rtt; + tmr_req->ref_cmd = ref_cmd; + tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn); + + return ISCSI_TMF_RSP_COMPLETE; +} + +/* + * Called from iscsit_handle_task_mgt_cmd(). + */ +int iscsit_tmr_task_warm_reset( + struct iscsit_conn *conn, + struct iscsi_tmr_req *tmr_req, + unsigned char *buf) +{ + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); + + if (!na->tmr_warm_reset) { + pr_err("TMR Opcode TARGET_WARM_RESET authorization" + " failed for Initiator Node: %s\n", + sess->se_sess->se_node_acl->initiatorname); + return -1; + } + /* + * Do the real work in transport_generic_do_tmr(). + */ + return 0; +} + +int iscsit_tmr_task_cold_reset( + struct iscsit_conn *conn, + struct iscsi_tmr_req *tmr_req, + unsigned char *buf) +{ + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); + + if (!na->tmr_cold_reset) { + pr_err("TMR Opcode TARGET_COLD_RESET authorization" + " failed for Initiator Node: %s\n", + sess->se_sess->se_node_acl->initiatorname); + return -1; + } + /* + * Do the real work in transport_generic_do_tmr(). + */ + return 0; +} + +u8 iscsit_tmr_task_reassign( + struct iscsit_cmd *cmd, + unsigned char *buf) +{ + struct iscsit_cmd *ref_cmd = NULL; + struct iscsit_conn *conn = cmd->conn; + struct iscsi_conn_recovery *cr = NULL; + struct iscsi_tmr_req *tmr_req = cmd->tmr_req; + struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; + struct iscsi_tm *hdr = (struct iscsi_tm *) buf; + u64 ret, ref_lun; + + pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," + " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", + hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid); + + if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) { + pr_err("TMR TASK_REASSIGN not supported in ERL<2," + " ignoring request.\n"); + return ISCSI_TMF_RSP_NOT_SUPPORTED; + } + + ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt); + if (ret == -2) { + pr_err("Command ITT: 0x%08x is still alligent to CID:" + " %hu\n", ref_cmd->init_task_tag, cr->cid); + return ISCSI_TMF_RSP_TASK_ALLEGIANT; + } else if (ret == -1) { + pr_err("Unable to locate RefTaskTag: 0x%08x in" + " connection recovery command list.\n", hdr->rtt); + return ISCSI_TMF_RSP_NO_TASK; + } + /* + * Temporary check to prevent connection recovery for + * connections with a differing Max*DataSegmentLength. + */ + if (cr->maxrecvdatasegmentlength != + conn->conn_ops->MaxRecvDataSegmentLength) { + pr_err("Unable to perform connection recovery for" + " differing MaxRecvDataSegmentLength, rejecting" + " TMR TASK_REASSIGN.\n"); + return ISCSI_TMF_RSP_REJECTED; + } + if (cr->maxxmitdatasegmentlength != + conn->conn_ops->MaxXmitDataSegmentLength) { + pr_err("Unable to perform connection recovery for" + " differing MaxXmitDataSegmentLength, rejecting" + " TMR TASK_REASSIGN.\n"); + return ISCSI_TMF_RSP_REJECTED; + } + + ref_lun = scsilun_to_int(&hdr->lun); + if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) { + pr_err("Unable to perform connection recovery for" + " differing ref_lun: %llu ref_cmd orig_fe_lun: %llu\n", + ref_lun, ref_cmd->se_cmd.orig_fe_lun); + return ISCSI_TMF_RSP_REJECTED; + } + + se_tmr->ref_task_tag = (__force u32)hdr->rtt; + tmr_req->ref_cmd = ref_cmd; + tmr_req->exp_data_sn = be32_to_cpu(hdr->exp_datasn); + tmr_req->conn_recovery = cr; + tmr_req->task_reassign = 1; + /* + * Command can now be reassigned to a new connection. + * The task management response must be sent before the + * reassignment actually happens. See iscsi_tmr_post_handler(). + */ + return ISCSI_TMF_RSP_COMPLETE; +} + +static void iscsit_task_reassign_remove_cmd( + struct iscsit_cmd *cmd, + struct iscsi_conn_recovery *cr, + struct iscsit_session *sess) +{ + int ret; + + spin_lock(&cr->conn_recovery_cmd_lock); + ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess); + spin_unlock(&cr->conn_recovery_cmd_lock); + if (!ret) { + pr_debug("iSCSI connection recovery successful for CID:" + " %hu on SID: %u\n", cr->cid, sess->sid); + iscsit_remove_active_connection_recovery_entry(cr, sess); + } +} + +static int iscsit_task_reassign_complete_nop_out( + struct iscsi_tmr_req *tmr_req, + struct iscsit_conn *conn) +{ + struct iscsit_cmd *cmd = tmr_req->ref_cmd; + struct iscsi_conn_recovery *cr; + + if (!cmd->cr) { + pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x" + " is NULL!\n", cmd->init_task_tag); + return -1; + } + cr = cmd->cr; + + /* + * Reset the StatSN so a new one for this commands new connection + * will be assigned. + * Reset the ExpStatSN as well so we may receive Status SNACKs. + */ + cmd->stat_sn = cmd->exp_stat_sn = 0; + + iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + cmd->i_state = ISTATE_SEND_NOPIN; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + return 0; +} + +static int iscsit_task_reassign_complete_write( + struct iscsit_cmd *cmd, + struct iscsi_tmr_req *tmr_req) +{ + int no_build_r2ts = 0; + u32 length = 0, offset = 0; + struct iscsit_conn *conn = cmd->conn; + struct se_cmd *se_cmd = &cmd->se_cmd; + /* + * The Initiator must not send a R2T SNACK with a Begrun less than + * the TMR TASK_REASSIGN's ExpDataSN. + */ + if (!tmr_req->exp_data_sn) { + cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK; + cmd->acked_data_sn = 0; + } else { + cmd->cmd_flags |= ICF_GOT_DATACK_SNACK; + cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); + } + + /* + * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the + * Initiator is expecting. The Target controls all WRITE operations + * so if we have received all DataOUT we can safety ignore Initiator. + */ + if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { + if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) { + pr_debug("WRITE ITT: 0x%08x: t_state: %d" + " never sent to transport\n", + cmd->init_task_tag, cmd->se_cmd.t_state); + target_execute_cmd(se_cmd); + return 0; + } + + cmd->i_state = ISTATE_SEND_STATUS; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + return 0; + } + + /* + * Special case to deal with DataSequenceInOrder=No and Non-Immeidate + * Unsolicited DataOut. + */ + if (cmd->unsolicited_data) { + cmd->unsolicited_data = 0; + + offset = cmd->next_burst_len = cmd->write_data_done; + + if ((conn->sess->sess_ops->FirstBurstLength - offset) >= + cmd->se_cmd.data_length) { + no_build_r2ts = 1; + length = (cmd->se_cmd.data_length - offset); + } else + length = (conn->sess->sess_ops->FirstBurstLength - offset); + + spin_lock_bh(&cmd->r2t_lock); + if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) { + spin_unlock_bh(&cmd->r2t_lock); + return -1; + } + cmd->outstanding_r2ts++; + spin_unlock_bh(&cmd->r2t_lock); + + if (no_build_r2ts) + return 0; + } + /* + * iscsit_build_r2ts_for_cmd() can handle the rest from here. + */ + return conn->conn_transport->iscsit_get_dataout(conn, cmd, true); +} + +static int iscsit_task_reassign_complete_read( + struct iscsit_cmd *cmd, + struct iscsi_tmr_req *tmr_req) +{ + struct iscsit_conn *conn = cmd->conn; + struct iscsi_datain_req *dr; + struct se_cmd *se_cmd = &cmd->se_cmd; + /* + * The Initiator must not send a Data SNACK with a BegRun less than + * the TMR TASK_REASSIGN's ExpDataSN. + */ + if (!tmr_req->exp_data_sn) { + cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK; + cmd->acked_data_sn = 0; + } else { + cmd->cmd_flags |= ICF_GOT_DATACK_SNACK; + cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); + } + + if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) { + pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" + " transport\n", cmd->init_task_tag, + cmd->se_cmd.t_state); + transport_handle_cdb_direct(se_cmd); + return 0; + } + + if (!(se_cmd->transport_state & CMD_T_COMPLETE)) { + pr_err("READ ITT: 0x%08x: t_state: %d, never returned" + " from transport\n", cmd->init_task_tag, + cmd->se_cmd.t_state); + return -1; + } + + dr = iscsit_allocate_datain_req(); + if (!dr) + return -1; + /* + * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the + * Initiator is expecting. + */ + dr->data_sn = dr->begrun = tmr_req->exp_data_sn; + dr->runlength = 0; + dr->generate_recovery_values = 1; + dr->recovery = DATAIN_CONNECTION_RECOVERY; + + iscsit_attach_datain_req(cmd, dr); + + cmd->i_state = ISTATE_SEND_DATAIN; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + return 0; +} + +static int iscsit_task_reassign_complete_none( + struct iscsit_cmd *cmd, + struct iscsi_tmr_req *tmr_req) +{ + struct iscsit_conn *conn = cmd->conn; + + cmd->i_state = ISTATE_SEND_STATUS; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + return 0; +} + +static int iscsit_task_reassign_complete_scsi_cmnd( + struct iscsi_tmr_req *tmr_req, + struct iscsit_conn *conn) +{ + struct iscsit_cmd *cmd = tmr_req->ref_cmd; + struct iscsi_conn_recovery *cr; + + if (!cmd->cr) { + pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x" + " is NULL!\n", cmd->init_task_tag); + return -1; + } + cr = cmd->cr; + + /* + * Reset the StatSN so a new one for this commands new connection + * will be assigned. + * Reset the ExpStatSN as well so we may receive Status SNACKs. + */ + cmd->stat_sn = cmd->exp_stat_sn = 0; + + iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); + + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + cmd->i_state = ISTATE_SEND_STATUS; + iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + return 0; + } + + switch (cmd->data_direction) { + case DMA_TO_DEVICE: + return iscsit_task_reassign_complete_write(cmd, tmr_req); + case DMA_FROM_DEVICE: + return iscsit_task_reassign_complete_read(cmd, tmr_req); + case DMA_NONE: + return iscsit_task_reassign_complete_none(cmd, tmr_req); + default: + pr_err("Unknown cmd->data_direction: 0x%02x\n", + cmd->data_direction); + return -1; + } + + return 0; +} + +static int iscsit_task_reassign_complete( + struct iscsi_tmr_req *tmr_req, + struct iscsit_conn *conn) +{ + struct iscsit_cmd *cmd; + int ret = 0; + + if (!tmr_req->ref_cmd) { + pr_err("TMR Request is missing a RefCmd struct iscsit_cmd.\n"); + return -1; + } + cmd = tmr_req->ref_cmd; + + cmd->conn = conn; + + switch (cmd->iscsi_opcode) { + case ISCSI_OP_NOOP_OUT: + ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn); + break; + case ISCSI_OP_SCSI_CMD: + ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn); + break; + default: + pr_err("Illegal iSCSI Opcode 0x%02x during" + " command reallegiance\n", cmd->iscsi_opcode); + return -1; + } + + if (ret != 0) + return ret; + + pr_debug("Completed connection reallegiance for Opcode: 0x%02x," + " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode, + cmd->init_task_tag, conn->cid); + + return 0; +} + +/* + * Handles special after-the-fact actions related to TMRs. + * Right now the only one that its really needed for is + * connection recovery releated TASK_REASSIGN. + */ +int iscsit_tmr_post_handler(struct iscsit_cmd *cmd, struct iscsit_conn *conn) +{ + struct iscsi_tmr_req *tmr_req = cmd->tmr_req; + struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; + + if (tmr_req->task_reassign && + (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) + return iscsit_task_reassign_complete(tmr_req, conn); + + return 0; +} +EXPORT_SYMBOL(iscsit_tmr_post_handler); + +/* + * Nothing to do here, but leave it for good measure. :-) + */ +static int iscsit_task_reassign_prepare_read( + struct iscsi_tmr_req *tmr_req, + struct iscsit_conn *conn) +{ + return 0; +} + +static void iscsit_task_reassign_prepare_unsolicited_dataout( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + int i, j; + struct iscsi_pdu *pdu = NULL; + struct iscsi_seq *seq = NULL; + + if (conn->sess->sess_ops->DataSequenceInOrder) { + cmd->data_sn = 0; + + if (cmd->immediate_data) + cmd->r2t_offset += (cmd->first_burst_len - + cmd->seq_start_offset); + + if (conn->sess->sess_ops->DataPDUInOrder) { + cmd->write_data_done -= (cmd->immediate_data) ? + (cmd->first_burst_len - + cmd->seq_start_offset) : + cmd->first_burst_len; + cmd->first_burst_len = 0; + return; + } + + for (i = 0; i < cmd->pdu_count; i++) { + pdu = &cmd->pdu_list[i]; + + if (pdu->status != ISCSI_PDU_RECEIVED_OK) + continue; + + if ((pdu->offset >= cmd->seq_start_offset) && + ((pdu->offset + pdu->length) <= + cmd->seq_end_offset)) { + cmd->first_burst_len -= pdu->length; + cmd->write_data_done -= pdu->length; + pdu->status = ISCSI_PDU_NOT_RECEIVED; + } + } + } else { + for (i = 0; i < cmd->seq_count; i++) { + seq = &cmd->seq_list[i]; + + if (seq->type != SEQTYPE_UNSOLICITED) + continue; + + cmd->write_data_done -= + (seq->offset - seq->orig_offset); + cmd->first_burst_len = 0; + seq->data_sn = 0; + seq->offset = seq->orig_offset; + seq->next_burst_len = 0; + seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY; + + if (conn->sess->sess_ops->DataPDUInOrder) + continue; + + for (j = 0; j < seq->pdu_count; j++) { + pdu = &cmd->pdu_list[j+seq->pdu_start]; + + if (pdu->status != ISCSI_PDU_RECEIVED_OK) + continue; + + pdu->status = ISCSI_PDU_NOT_RECEIVED; + } + } + } +} + +static int iscsit_task_reassign_prepare_write( + struct iscsi_tmr_req *tmr_req, + struct iscsit_conn *conn) +{ + struct iscsit_cmd *cmd = tmr_req->ref_cmd; + struct iscsi_pdu *pdu = NULL; + struct iscsi_r2t *r2t = NULL, *r2t_tmp; + int first_incomplete_r2t = 1, i = 0; + + /* + * The command was in the process of receiving Unsolicited DataOUT when + * the connection failed. + */ + if (cmd->unsolicited_data) + iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn); + + /* + * The Initiator is requesting R2Ts starting from zero, skip + * checking acknowledged R2Ts and start checking struct iscsi_r2ts + * greater than zero. + */ + if (!tmr_req->exp_data_sn) + goto drop_unacknowledged_r2ts; + + /* + * We now check that the PDUs in DataOUT sequences below + * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is + * expecting next) have all the DataOUT they require to complete + * the DataOUT sequence. First scan from R2TSN 0 to TMR + * TASK_REASSIGN ExpDataSN-1. + * + * If we have not received all DataOUT in question, we must + * make sure to make the appropriate changes to values in + * struct iscsit_cmd (and elsewhere depending on session parameters) + * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write() + * will resend a new R2T for the DataOUT sequences in question. + */ + spin_lock_bh(&cmd->r2t_lock); + if (list_empty(&cmd->cmd_r2t_list)) { + spin_unlock_bh(&cmd->r2t_lock); + return -1; + } + + list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { + + if (r2t->r2t_sn >= tmr_req->exp_data_sn) + continue; + /* + * Safely ignore Recovery R2Ts and R2Ts that have completed + * DataOUT sequences. + */ + if (r2t->seq_complete) + continue; + + if (r2t->recovery_r2t) + continue; + + /* + * DataSequenceInOrder=Yes: + * + * Taking into account the iSCSI implementation requirement of + * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and + * DataSequenceInOrder=Yes, we must take into consideration + * the following: + * + * DataSequenceInOrder=No: + * + * Taking into account that the Initiator controls the (possibly + * random) PDU Order in (possibly random) Sequence Order of + * DataOUT the target requests with R2Ts, we must take into + * consideration the following: + * + * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]: + * + * While processing non-complete R2T DataOUT sequence requests + * the Target will re-request only the total sequence length + * minus current received offset. This is because we must + * assume the initiator will continue sending DataOUT from the + * last PDU before the connection failed. + * + * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]: + * + * While processing non-complete R2T DataOUT sequence requests + * the Target will re-request the entire DataOUT sequence if + * any single PDU is missing from the sequence. This is because + * we have no logical method to determine the next PDU offset, + * and we must assume the Initiator will be sending any random + * PDU offset in the current sequence after TASK_REASSIGN + * has completed. + */ + if (conn->sess->sess_ops->DataSequenceInOrder) { + if (!first_incomplete_r2t) { + cmd->r2t_offset -= r2t->xfer_len; + goto next; + } + + if (conn->sess->sess_ops->DataPDUInOrder) { + cmd->data_sn = 0; + cmd->r2t_offset -= (r2t->xfer_len - + cmd->next_burst_len); + first_incomplete_r2t = 0; + goto next; + } + + cmd->data_sn = 0; + cmd->r2t_offset -= r2t->xfer_len; + + for (i = 0; i < cmd->pdu_count; i++) { + pdu = &cmd->pdu_list[i]; + + if (pdu->status != ISCSI_PDU_RECEIVED_OK) + continue; + + if ((pdu->offset >= r2t->offset) && + (pdu->offset < (r2t->offset + + r2t->xfer_len))) { + cmd->next_burst_len -= pdu->length; + cmd->write_data_done -= pdu->length; + pdu->status = ISCSI_PDU_NOT_RECEIVED; + } + } + + first_incomplete_r2t = 0; + } else { + struct iscsi_seq *seq; + + seq = iscsit_get_seq_holder(cmd, r2t->offset, + r2t->xfer_len); + if (!seq) { + spin_unlock_bh(&cmd->r2t_lock); + return -1; + } + + cmd->write_data_done -= + (seq->offset - seq->orig_offset); + seq->data_sn = 0; + seq->offset = seq->orig_offset; + seq->next_burst_len = 0; + seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY; + + cmd->seq_send_order--; + + if (conn->sess->sess_ops->DataPDUInOrder) + goto next; + + for (i = 0; i < seq->pdu_count; i++) { + pdu = &cmd->pdu_list[i+seq->pdu_start]; + + if (pdu->status != ISCSI_PDU_RECEIVED_OK) + continue; + + pdu->status = ISCSI_PDU_NOT_RECEIVED; + } + } + +next: + cmd->outstanding_r2ts--; + } + spin_unlock_bh(&cmd->r2t_lock); + + /* + * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR + * TASK_REASSIGN to the last R2T in the list.. We are also careful + * to check that the Initiator is not requesting R2Ts for DataOUT + * sequences it has already completed. + * + * Free each R2T in question and adjust values in struct iscsit_cmd + * accordingly so iscsit_build_r2ts_for_cmd() do the rest of + * the work after the TMR TASK_REASSIGN Response is sent. + */ +drop_unacknowledged_r2ts: + + cmd->cmd_flags &= ~ICF_SENT_LAST_R2T; + cmd->r2t_sn = tmr_req->exp_data_sn; + + spin_lock_bh(&cmd->r2t_lock); + list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) { + /* + * Skip up to the R2T Sequence number provided by the + * iSCSI TASK_REASSIGN TMR + */ + if (r2t->r2t_sn < tmr_req->exp_data_sn) + continue; + + if (r2t->seq_complete) { + pr_err("Initiator is requesting R2Ts from" + " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u," + " Length: %u is already complete." + " BAD INITIATOR ERL=2 IMPLEMENTATION!\n", + tmr_req->exp_data_sn, r2t->r2t_sn, + r2t->offset, r2t->xfer_len); + spin_unlock_bh(&cmd->r2t_lock); + return -1; + } + + if (r2t->recovery_r2t) { + iscsit_free_r2t(r2t, cmd); + continue; + } + + /* DataSequenceInOrder=Yes: + * + * Taking into account the iSCSI implementation requirement of + * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and + * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts + * entire transfer length from the commands R2T offset marker. + * + * DataSequenceInOrder=No: + * + * We subtract the difference from struct iscsi_seq between the + * current offset and original offset from cmd->write_data_done + * for account for DataOUT PDUs already received. Then reset + * the current offset to the original and zero out the current + * burst length, to make sure we re-request the entire DataOUT + * sequence. + */ + if (conn->sess->sess_ops->DataSequenceInOrder) + cmd->r2t_offset -= r2t->xfer_len; + else + cmd->seq_send_order--; + + cmd->outstanding_r2ts--; + iscsit_free_r2t(r2t, cmd); + } + spin_unlock_bh(&cmd->r2t_lock); + + return 0; +} + +/* + * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for + * a given struct iscsit_cmd. + */ +int iscsit_check_task_reassign_expdatasn( + struct iscsi_tmr_req *tmr_req, + struct iscsit_conn *conn) +{ + struct iscsit_cmd *ref_cmd = tmr_req->ref_cmd; + + if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) + return 0; + + if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) + return 0; + + if (ref_cmd->data_direction == DMA_NONE) + return 0; + + /* + * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN + * of DataIN the Initiator is expecting. + * + * Also check that the Initiator is not re-requesting DataIN that has + * already been acknowledged with a DataAck SNACK. + */ + if (ref_cmd->data_direction == DMA_FROM_DEVICE) { + if (tmr_req->exp_data_sn > ref_cmd->data_sn) { + pr_err("Received ExpDataSN: 0x%08x for READ" + " in TMR TASK_REASSIGN greater than command's" + " DataSN: 0x%08x.\n", tmr_req->exp_data_sn, + ref_cmd->data_sn); + return -1; + } + if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) && + (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) { + pr_err("Received ExpDataSN: 0x%08x for READ" + " in TMR TASK_REASSIGN for previously" + " acknowledged DataIN: 0x%08x," + " protocol error\n", tmr_req->exp_data_sn, + ref_cmd->acked_data_sn); + return -1; + } + return iscsit_task_reassign_prepare_read(tmr_req, conn); + } + + /* + * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN + * for R2Ts the Initiator is expecting. + * + * Do the magic in iscsit_task_reassign_prepare_write(). + */ + if (ref_cmd->data_direction == DMA_TO_DEVICE) { + if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) { + pr_err("Received ExpDataSN: 0x%08x for WRITE" + " in TMR TASK_REASSIGN greater than command's" + " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn, + ref_cmd->r2t_sn); + return -1; + } + return iscsit_task_reassign_prepare_write(tmr_req, conn); + } + + pr_err("Unknown iSCSI data_direction: 0x%02x\n", + ref_cmd->data_direction); + + return -1; +} diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h new file mode 100644 index 0000000000..3413d0f596 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_tmr.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_TMR_H +#define ISCSI_TARGET_TMR_H + +#include <linux/types.h> + +struct iscsit_cmd; +struct iscsit_conn; +struct iscsi_tmr_req; + +extern u8 iscsit_tmr_abort_task(struct iscsit_cmd *, unsigned char *); +extern int iscsit_tmr_task_warm_reset(struct iscsit_conn *, struct iscsi_tmr_req *, + unsigned char *); +extern int iscsit_tmr_task_cold_reset(struct iscsit_conn *, struct iscsi_tmr_req *, + unsigned char *); +extern u8 iscsit_tmr_task_reassign(struct iscsit_cmd *, unsigned char *); +extern int iscsit_tmr_post_handler(struct iscsit_cmd *, struct iscsit_conn *); +extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *, + struct iscsit_conn *); + +#endif /* ISCSI_TARGET_TMR_H */ diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c new file mode 100644 index 0000000000..f7bac98fd4 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains iSCSI Target Portal Group related functions. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_erl0.h" +#include "iscsi_target_login.h" +#include "iscsi_target_nodeattrib.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" +#include "iscsi_target_parameters.h" + +#include <target/iscsi/iscsi_transport.h> + +struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt) +{ + struct iscsi_portal_group *tpg; + + tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct iscsi_portal_group\n"); + return NULL; + } + + tpg->tpgt = tpgt; + tpg->tpg_state = TPG_STATE_FREE; + tpg->tpg_tiqn = tiqn; + INIT_LIST_HEAD(&tpg->tpg_gnp_list); + INIT_LIST_HEAD(&tpg->tpg_list); + mutex_init(&tpg->tpg_access_lock); + sema_init(&tpg->np_login_sem, 1); + spin_lock_init(&tpg->tpg_state_lock); + spin_lock_init(&tpg->tpg_np_lock); + + return tpg; +} + +static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *); + +int iscsit_load_discovery_tpg(void) +{ + struct iscsi_param *param; + struct iscsi_portal_group *tpg; + int ret; + + tpg = iscsit_alloc_portal_group(NULL, 1); + if (!tpg) { + pr_err("Unable to allocate struct iscsi_portal_group\n"); + return -1; + } + /* + * Save iscsi_ops pointer for special case discovery TPG that + * doesn't exist as se_wwn->wwn_group within configfs. + */ + tpg->tpg_se_tpg.se_tpg_tfo = &iscsi_ops; + ret = core_tpg_register(NULL, &tpg->tpg_se_tpg, -1); + if (ret < 0) { + kfree(tpg); + return -1; + } + + tpg->sid = 1; /* First Assigned LIO Session ID */ + iscsit_set_default_tpg_attribs(tpg); + + if (iscsi_create_default_params(&tpg->param_list) < 0) + goto out; + /* + * By default we disable authentication for discovery sessions, + * this can be changed with: + * + * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth + */ + param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); + if (!param) + goto free_pl_out; + + if (iscsi_update_param_value(param, "CHAP,None") < 0) + goto free_pl_out; + + tpg->tpg_attrib.authentication = 0; + + spin_lock(&tpg->tpg_state_lock); + tpg->tpg_state = TPG_STATE_ACTIVE; + spin_unlock(&tpg->tpg_state_lock); + + iscsit_global->discovery_tpg = tpg; + pr_debug("CORE[0] - Allocated Discovery TPG\n"); + + return 0; +free_pl_out: + iscsi_release_param_list(tpg->param_list); +out: + if (tpg->sid == 1) + core_tpg_deregister(&tpg->tpg_se_tpg); + kfree(tpg); + return -1; +} + +void iscsit_release_discovery_tpg(void) +{ + struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg; + + if (!tpg) + return; + + iscsi_release_param_list(tpg->param_list); + core_tpg_deregister(&tpg->tpg_se_tpg); + + kfree(tpg); + iscsit_global->discovery_tpg = NULL; +} + +struct iscsi_portal_group *iscsit_get_tpg_from_np( + struct iscsi_tiqn *tiqn, + struct iscsi_np *np, + struct iscsi_tpg_np **tpg_np_out) +{ + struct iscsi_portal_group *tpg = NULL; + struct iscsi_tpg_np *tpg_np; + + spin_lock(&tiqn->tiqn_tpg_lock); + list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { + + spin_lock(&tpg->tpg_state_lock); + if (tpg->tpg_state != TPG_STATE_ACTIVE) { + spin_unlock(&tpg->tpg_state_lock); + continue; + } + spin_unlock(&tpg->tpg_state_lock); + + spin_lock(&tpg->tpg_np_lock); + list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { + if (tpg_np->tpg_np == np) { + *tpg_np_out = tpg_np; + kref_get(&tpg_np->tpg_np_kref); + spin_unlock(&tpg->tpg_np_lock); + spin_unlock(&tiqn->tiqn_tpg_lock); + return tpg; + } + } + spin_unlock(&tpg->tpg_np_lock); + } + spin_unlock(&tiqn->tiqn_tpg_lock); + + return NULL; +} + +int iscsit_get_tpg( + struct iscsi_portal_group *tpg) +{ + return mutex_lock_interruptible(&tpg->tpg_access_lock); +} + +void iscsit_put_tpg(struct iscsi_portal_group *tpg) +{ + mutex_unlock(&tpg->tpg_access_lock); +} + +static void iscsit_clear_tpg_np_login_thread( + struct iscsi_tpg_np *tpg_np, + struct iscsi_portal_group *tpg, + bool shutdown) +{ + if (!tpg_np->tpg_np) { + pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); + return; + } + + if (shutdown) + tpg_np->tpg_np->enabled = false; + iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); +} + +static void iscsit_clear_tpg_np_login_threads( + struct iscsi_portal_group *tpg, + bool shutdown) +{ + struct iscsi_tpg_np *tpg_np; + + spin_lock(&tpg->tpg_np_lock); + list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { + if (!tpg_np->tpg_np) { + pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); + continue; + } + spin_unlock(&tpg->tpg_np_lock); + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown); + spin_lock(&tpg->tpg_np_lock); + } + spin_unlock(&tpg->tpg_np_lock); +} + +void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg) +{ + iscsi_print_params(tpg->param_list); +} + +static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + a->authentication = TA_AUTHENTICATION; + a->login_timeout = TA_LOGIN_TIMEOUT; + a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH; + a->generate_node_acls = TA_GENERATE_NODE_ACLS; + a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS; + a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT; + a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; + a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; + a->default_erl = TA_DEFAULT_ERL; + a->t10_pi = TA_DEFAULT_T10_PI; + a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; + a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS; + a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND; +} + +int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) +{ + if (tpg->tpg_state != TPG_STATE_FREE) { + pr_err("Unable to add iSCSI Target Portal Group: %d" + " while not in TPG_STATE_FREE state.\n", tpg->tpgt); + return -EEXIST; + } + iscsit_set_default_tpg_attribs(tpg); + + if (iscsi_create_default_params(&tpg->param_list) < 0) + goto err_out; + + tpg->tpg_attrib.tpg = tpg; + + spin_lock(&tpg->tpg_state_lock); + tpg->tpg_state = TPG_STATE_INACTIVE; + spin_unlock(&tpg->tpg_state_lock); + + spin_lock(&tiqn->tiqn_tpg_lock); + list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list); + tiqn->tiqn_ntpgs++; + pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n", + tiqn->tiqn, tpg->tpgt); + spin_unlock(&tiqn->tiqn_tpg_lock); + + return 0; +err_out: + if (tpg->param_list) { + iscsi_release_param_list(tpg->param_list); + tpg->param_list = NULL; + } + return -ENOMEM; +} + +int iscsit_tpg_del_portal_group( + struct iscsi_tiqn *tiqn, + struct iscsi_portal_group *tpg, + int force) +{ + u8 old_state = tpg->tpg_state; + + spin_lock(&tpg->tpg_state_lock); + tpg->tpg_state = TPG_STATE_INACTIVE; + spin_unlock(&tpg->tpg_state_lock); + + if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { + pr_err("Unable to delete iSCSI Target Portal Group:" + " %hu while active sessions exist, and force=0\n", + tpg->tpgt); + tpg->tpg_state = old_state; + return -EPERM; + } + + if (tpg->param_list) { + iscsi_release_param_list(tpg->param_list); + tpg->param_list = NULL; + } + + core_tpg_deregister(&tpg->tpg_se_tpg); + + spin_lock(&tpg->tpg_state_lock); + tpg->tpg_state = TPG_STATE_FREE; + spin_unlock(&tpg->tpg_state_lock); + + spin_lock(&tiqn->tiqn_tpg_lock); + tiqn->tiqn_ntpgs--; + list_del(&tpg->tpg_list); + spin_unlock(&tiqn->tiqn_tpg_lock); + + pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n", + tiqn->tiqn, tpg->tpgt); + + kfree(tpg); + return 0; +} + +int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) +{ + struct iscsi_param *param; + struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; + int ret; + + if (tpg->tpg_state == TPG_STATE_ACTIVE) { + pr_err("iSCSI target portal group: %hu is already" + " active, ignoring request.\n", tpg->tpgt); + return -EINVAL; + } + /* + * Make sure that AuthMethod does not contain None as an option + * unless explictly disabled. Set the default to CHAP if authentication + * is enforced (as per default), and remove the NONE option. + */ + param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); + if (!param) + return -EINVAL; + + if (tpg->tpg_attrib.authentication) { + if (!strcmp(param->value, NONE)) { + ret = iscsi_update_param_value(param, CHAP); + if (ret) + goto err; + } + + ret = iscsit_ta_authentication(tpg, 1); + if (ret < 0) + goto err; + } + + spin_lock(&tpg->tpg_state_lock); + tpg->tpg_state = TPG_STATE_ACTIVE; + spin_unlock(&tpg->tpg_state_lock); + + spin_lock(&tiqn->tiqn_tpg_lock); + tiqn->tiqn_active_tpgs++; + pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n", + tpg->tpgt); + spin_unlock(&tiqn->tiqn_tpg_lock); + + return 0; + +err: + return ret; +} + +int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force) +{ + struct iscsi_tiqn *tiqn; + u8 old_state = tpg->tpg_state; + + spin_lock(&tpg->tpg_state_lock); + if (tpg->tpg_state == TPG_STATE_INACTIVE) { + pr_err("iSCSI Target Portal Group: %hu is already" + " inactive, ignoring request.\n", tpg->tpgt); + spin_unlock(&tpg->tpg_state_lock); + return -EINVAL; + } + tpg->tpg_state = TPG_STATE_INACTIVE; + spin_unlock(&tpg->tpg_state_lock); + + iscsit_clear_tpg_np_login_threads(tpg, false); + + if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { + spin_lock(&tpg->tpg_state_lock); + tpg->tpg_state = old_state; + spin_unlock(&tpg->tpg_state_lock); + pr_err("Unable to disable iSCSI Target Portal Group:" + " %hu while active sessions exist, and force=0\n", + tpg->tpgt); + return -EPERM; + } + + tiqn = tpg->tpg_tiqn; + if (!tiqn || (tpg == iscsit_global->discovery_tpg)) + return 0; + + spin_lock(&tiqn->tiqn_tpg_lock); + tiqn->tiqn_active_tpgs--; + pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n", + tpg->tpgt); + spin_unlock(&tiqn->tiqn_tpg_lock); + + return 0; +} + +struct iscsi_node_attrib *iscsit_tpg_get_node_attrib( + struct iscsit_session *sess) +{ + struct se_session *se_sess = sess->se_sess; + struct se_node_acl *se_nacl = se_sess->se_node_acl; + struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl); + + return &acl->node_attrib; +} + +struct iscsi_tpg_np *iscsit_tpg_locate_child_np( + struct iscsi_tpg_np *tpg_np, + int network_transport) +{ + struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp; + + spin_lock(&tpg_np->tpg_np_parent_lock); + list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp, + &tpg_np->tpg_np_parent_list, tpg_np_child_list) { + if (tpg_np_child->tpg_np->np_network_transport == + network_transport) { + spin_unlock(&tpg_np->tpg_np_parent_lock); + return tpg_np_child; + } + } + spin_unlock(&tpg_np->tpg_np_parent_lock); + + return NULL; +} + +static bool iscsit_tpg_check_network_portal( + struct iscsi_tiqn *tiqn, + struct sockaddr_storage *sockaddr, + int network_transport) +{ + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np; + struct iscsi_np *np; + bool match = false; + + spin_lock(&tiqn->tiqn_tpg_lock); + list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { + + spin_lock(&tpg->tpg_np_lock); + list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { + np = tpg_np->tpg_np; + + match = iscsit_check_np_match(sockaddr, np, + network_transport); + if (match) + break; + } + spin_unlock(&tpg->tpg_np_lock); + + if (match) + break; + } + spin_unlock(&tiqn->tiqn_tpg_lock); + + return match; +} + +struct iscsi_tpg_np *iscsit_tpg_add_network_portal( + struct iscsi_portal_group *tpg, + struct sockaddr_storage *sockaddr, + struct iscsi_tpg_np *tpg_np_parent, + int network_transport) +{ + struct iscsi_np *np; + struct iscsi_tpg_np *tpg_np; + + if (!tpg_np_parent) { + if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, + network_transport)) { + pr_err("Network Portal: %pISc already exists on a" + " different TPG on %s\n", sockaddr, + tpg->tpg_tiqn->tiqn); + return ERR_PTR(-EEXIST); + } + } + + tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL); + if (!tpg_np) { + pr_err("Unable to allocate memory for" + " struct iscsi_tpg_np.\n"); + return ERR_PTR(-ENOMEM); + } + + np = iscsit_add_np(sockaddr, network_transport); + if (IS_ERR(np)) { + kfree(tpg_np); + return ERR_CAST(np); + } + + INIT_LIST_HEAD(&tpg_np->tpg_np_list); + INIT_LIST_HEAD(&tpg_np->tpg_np_child_list); + INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list); + spin_lock_init(&tpg_np->tpg_np_parent_lock); + init_completion(&tpg_np->tpg_np_comp); + kref_init(&tpg_np->tpg_np_kref); + tpg_np->tpg_np = np; + tpg_np->tpg = tpg; + + spin_lock(&tpg->tpg_np_lock); + list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list); + tpg->num_tpg_nps++; + if (tpg->tpg_tiqn) + tpg->tpg_tiqn->tiqn_num_tpg_nps++; + spin_unlock(&tpg->tpg_np_lock); + + if (tpg_np_parent) { + tpg_np->tpg_np_parent = tpg_np_parent; + spin_lock(&tpg_np_parent->tpg_np_parent_lock); + list_add_tail(&tpg_np->tpg_np_child_list, + &tpg_np_parent->tpg_np_parent_list); + spin_unlock(&tpg_np_parent->tpg_np_parent_lock); + } + + pr_debug("CORE[%s] - Added Network Portal: %pISpc,%hu on %s\n", + tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt, + np->np_transport->name); + + return tpg_np; +} + +static int iscsit_tpg_release_np( + struct iscsi_tpg_np *tpg_np, + struct iscsi_portal_group *tpg, + struct iscsi_np *np) +{ + iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true); + + pr_debug("CORE[%s] - Removed Network Portal: %pISpc,%hu on %s\n", + tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt, + np->np_transport->name); + + tpg_np->tpg_np = NULL; + tpg_np->tpg = NULL; + kfree(tpg_np); + /* + * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released. + */ + return iscsit_del_np(np); +} + +int iscsit_tpg_del_network_portal( + struct iscsi_portal_group *tpg, + struct iscsi_tpg_np *tpg_np) +{ + struct iscsi_np *np; + struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp; + int ret = 0; + + np = tpg_np->tpg_np; + if (!np) { + pr_err("Unable to locate struct iscsi_np from" + " struct iscsi_tpg_np\n"); + return -EINVAL; + } + + if (!tpg_np->tpg_np_parent) { + /* + * We are the parent tpg network portal. Release all of the + * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent + * list first. + */ + list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp, + &tpg_np->tpg_np_parent_list, + tpg_np_child_list) { + ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child); + if (ret < 0) + pr_err("iscsit_tpg_del_network_portal()" + " failed: %d\n", ret); + } + } else { + /* + * We are not the parent ISCSI_TCP tpg network portal. Release + * our own network portals from the child list. + */ + spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock); + list_del(&tpg_np->tpg_np_child_list); + spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock); + } + + spin_lock(&tpg->tpg_np_lock); + list_del(&tpg_np->tpg_np_list); + tpg->num_tpg_nps--; + if (tpg->tpg_tiqn) + tpg->tpg_tiqn->tiqn_num_tpg_nps--; + spin_unlock(&tpg->tpg_np_lock); + + return iscsit_tpg_release_np(tpg_np, tpg, np); +} + +int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) +{ + unsigned char buf1[256], buf2[256], *none = NULL; + int len; + struct iscsi_param *param; + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((authentication != 1) && (authentication != 0)) { + pr_err("Illegal value for authentication parameter:" + " %u, ignoring request.\n", authentication); + return -EINVAL; + } + + memset(buf1, 0, sizeof(buf1)); + memset(buf2, 0, sizeof(buf2)); + + param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); + if (!param) + return -EINVAL; + + if (authentication) { + snprintf(buf1, sizeof(buf1), "%s", param->value); + none = strstr(buf1, NONE); + if (!none) + goto out; + if (!strncmp(none + 4, ",", 1)) { + if (!strcmp(buf1, none)) + sprintf(buf2, "%s", none+5); + else { + none--; + *none = '\0'; + len = sprintf(buf2, "%s", buf1); + none += 5; + sprintf(buf2 + len, "%s", none); + } + } else { + none--; + *none = '\0'; + sprintf(buf2, "%s", buf1); + } + if (iscsi_update_param_value(param, buf2) < 0) + return -EINVAL; + } else { + snprintf(buf1, sizeof(buf1), "%s", param->value); + none = strstr(buf1, NONE); + if (none) + goto out; + strlcat(buf1, "," NONE, sizeof(buf1)); + if (iscsi_update_param_value(param, buf1) < 0) + return -EINVAL; + } + +out: + a->authentication = authentication; + pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n", + a->authentication ? "Enforcing" : "Disabling", tpg->tpgt); + + return 0; +} + +int iscsit_ta_login_timeout( + struct iscsi_portal_group *tpg, + u32 login_timeout) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if (login_timeout > TA_LOGIN_TIMEOUT_MAX) { + pr_err("Requested Login Timeout %u larger than maximum" + " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX); + return -EINVAL; + } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) { + pr_err("Requested Logout Timeout %u smaller than" + " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN); + return -EINVAL; + } + + a->login_timeout = login_timeout; + pr_debug("Set Logout Timeout to %u for Target Portal Group" + " %hu\n", a->login_timeout, tpg->tpgt); + + return 0; +} + +int iscsit_ta_generate_node_acls( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->generate_node_acls = flag; + pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n", + tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled"); + + if (flag == 1 && a->cache_dynamic_acls == 0) { + pr_debug("Explicitly setting cache_dynamic_acls=1 when " + "generate_node_acls=1\n"); + a->cache_dynamic_acls = 1; + } + + return 0; +} + +int iscsit_ta_default_cmdsn_depth( + struct iscsi_portal_group *tpg, + u32 tcq_depth) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) { + pr_err("Requested Default Queue Depth: %u larger" + " than maximum %u\n", tcq_depth, + TA_DEFAULT_CMDSN_DEPTH_MAX); + return -EINVAL; + } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) { + pr_err("Requested Default Queue Depth: %u smaller" + " than minimum %u\n", tcq_depth, + TA_DEFAULT_CMDSN_DEPTH_MIN); + return -EINVAL; + } + + a->default_cmdsn_depth = tcq_depth; + pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n", + tpg->tpgt, a->default_cmdsn_depth); + + return 0; +} + +int iscsit_ta_cache_dynamic_acls( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + if (a->generate_node_acls == 1 && flag == 0) { + pr_debug("Skipping cache_dynamic_acls=0 when" + " generate_node_acls=1\n"); + return 0; + } + + a->cache_dynamic_acls = flag; + pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group" + " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ? + "Enabled" : "Disabled"); + + return 0; +} + +int iscsit_ta_demo_mode_write_protect( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->demo_mode_write_protect = flag; + pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n", + tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_prod_mode_write_protect( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->prod_mode_write_protect = flag; + pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:" + " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ? + "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_demo_mode_discovery( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->demo_mode_discovery = flag; + pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:" + " %s\n", tpg->tpgt, (a->demo_mode_discovery) ? + "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_default_erl( + struct iscsi_portal_group *tpg, + u32 default_erl) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) { + pr_err("Illegal value for default_erl: %u\n", default_erl); + return -EINVAL; + } + + a->default_erl = default_erl; + pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl); + + return 0; +} + +int iscsit_ta_t10_pi( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->t10_pi = flag; + pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:" + " %s\n", tpg->tpgt, (a->t10_pi) ? + "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_fabric_prot_type( + struct iscsi_portal_group *tpg, + u32 prot_type) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) { + pr_err("Illegal value for fabric_prot_type: %u\n", prot_type); + return -EINVAL; + } + + a->fabric_prot_type = prot_type; + pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n", + tpg->tpgt, prot_type); + + return 0; +} + +int iscsit_ta_tpg_enabled_sendtargets( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->tpg_enabled_sendtargets = flag; + pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:" + " %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_login_keys_workaround( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->login_keys_workaround = flag; + pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ", + tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF"); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h new file mode 100644 index 0000000000..71d067f621 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_TPG_H +#define ISCSI_TARGET_TPG_H + +#include <linux/types.h> + +struct iscsi_np; +struct iscsit_session; +struct iscsi_tiqn; +struct iscsi_tpg_np; +struct se_node_acl; +struct sockaddr_storage; + +extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16); +extern int iscsit_load_discovery_tpg(void); +extern void iscsit_release_discovery_tpg(void); +extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, + struct iscsi_np *, struct iscsi_tpg_np **); +extern int iscsit_get_tpg(struct iscsi_portal_group *); +extern void iscsit_put_tpg(struct iscsi_portal_group *); +extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); +extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); +extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, + int); +extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *); +extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int); +extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl( + struct iscsi_portal_group *, const char *, u32); +extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *, + struct se_node_acl *); +extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *); +extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *); +extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int); +extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *, + struct sockaddr_storage *, struct iscsi_tpg_np *, + int); +extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *, + struct iscsi_tpg_np *); +extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32); +extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32); +extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32); +extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32); +extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32); +extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); +extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); +extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); +extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); +extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); +extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); +extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32); +extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32); + +#endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c new file mode 100644 index 0000000000..27c85f2604 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_transport.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/module.h> +#include <target/iscsi/iscsi_transport.h> + +static LIST_HEAD(g_transport_list); +static DEFINE_MUTEX(transport_mutex); + +struct iscsit_transport *iscsit_get_transport(int type) +{ + struct iscsit_transport *t; + + mutex_lock(&transport_mutex); + list_for_each_entry(t, &g_transport_list, t_node) { + if (t->transport_type == type) { + if (t->owner && !try_module_get(t->owner)) { + t = NULL; + } + mutex_unlock(&transport_mutex); + return t; + } + } + mutex_unlock(&transport_mutex); + + return NULL; +} + +void iscsit_put_transport(struct iscsit_transport *t) +{ + module_put(t->owner); +} + +void iscsit_register_transport(struct iscsit_transport *t) +{ + INIT_LIST_HEAD(&t->t_node); + + mutex_lock(&transport_mutex); + list_add_tail(&t->t_node, &g_transport_list); + mutex_unlock(&transport_mutex); + + pr_debug("Registered iSCSI transport: %s\n", t->name); +} +EXPORT_SYMBOL(iscsit_register_transport); + +void iscsit_unregister_transport(struct iscsit_transport *t) +{ + mutex_lock(&transport_mutex); + list_del(&t->t_node); + mutex_unlock(&transport_mutex); + + pr_debug("Unregistered iSCSI transport: %s\n", t->name); +} +EXPORT_SYMBOL(iscsit_unregister_transport); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c new file mode 100644 index 0000000000..91a75a4a7c --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -0,0 +1,1425 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * This file contains the iSCSI Target specific utility functions. + * + * (c) Copyright 2007-2013 Datera, Inc. + * + * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/list.h> +#include <linux/sched/signal.h> +#include <net/ipv6.h> /* ipv6_addr_equal() */ +#include <scsi/scsi_tcq.h> +#include <scsi/iscsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> + +#include <target/iscsi/iscsi_target_core.h> +#include "iscsi_target_parameters.h" +#include "iscsi_target_seq_pdu_list.h" +#include "iscsi_target_datain_values.h" +#include "iscsi_target_erl0.h" +#include "iscsi_target_erl1.h" +#include "iscsi_target_erl2.h" +#include "iscsi_target_tpg.h" +#include "iscsi_target_util.h" +#include "iscsi_target.h" + +extern struct list_head g_tiqn_list; +extern spinlock_t tiqn_lock; + +int iscsit_add_r2t_to_list( + struct iscsit_cmd *cmd, + u32 offset, + u32 xfer_len, + int recovery, + u32 r2t_sn) +{ + struct iscsi_r2t *r2t; + + lockdep_assert_held(&cmd->r2t_lock); + + WARN_ON_ONCE((s32)xfer_len < 0); + + r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); + if (!r2t) { + pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); + return -1; + } + INIT_LIST_HEAD(&r2t->r2t_list); + + r2t->recovery_r2t = recovery; + r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; + r2t->offset = offset; + r2t->xfer_len = xfer_len; + list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); + spin_unlock_bh(&cmd->r2t_lock); + + iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); + + spin_lock_bh(&cmd->r2t_lock); + return 0; +} + +struct iscsi_r2t *iscsit_get_r2t_for_eos( + struct iscsit_cmd *cmd, + u32 offset, + u32 length) +{ + struct iscsi_r2t *r2t; + + spin_lock_bh(&cmd->r2t_lock); + list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { + if ((r2t->offset <= offset) && + (r2t->offset + r2t->xfer_len) >= (offset + length)) { + spin_unlock_bh(&cmd->r2t_lock); + return r2t; + } + } + spin_unlock_bh(&cmd->r2t_lock); + + pr_err("Unable to locate R2T for Offset: %u, Length:" + " %u\n", offset, length); + return NULL; +} + +struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *cmd) +{ + struct iscsi_r2t *r2t; + + spin_lock_bh(&cmd->r2t_lock); + list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { + if (!r2t->sent_r2t) { + spin_unlock_bh(&cmd->r2t_lock); + return r2t; + } + } + spin_unlock_bh(&cmd->r2t_lock); + + pr_err("Unable to locate next R2T to send for ITT:" + " 0x%08x.\n", cmd->init_task_tag); + return NULL; +} + +void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsit_cmd *cmd) +{ + lockdep_assert_held(&cmd->r2t_lock); + + list_del(&r2t->r2t_list); + kmem_cache_free(lio_r2t_cache, r2t); +} + +void iscsit_free_r2ts_from_list(struct iscsit_cmd *cmd) +{ + struct iscsi_r2t *r2t, *r2t_tmp; + + spin_lock_bh(&cmd->r2t_lock); + list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) + iscsit_free_r2t(r2t, cmd); + spin_unlock_bh(&cmd->r2t_lock); +} + +static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup) +{ + int tag = -1; + DEFINE_SBQ_WAIT(wait); + struct sbq_wait_state *ws; + struct sbitmap_queue *sbq; + + if (state == TASK_RUNNING) + return tag; + + sbq = &se_sess->sess_tag_pool; + ws = &sbq->ws[0]; + for (;;) { + sbitmap_prepare_to_wait(sbq, ws, &wait, state); + if (signal_pending_state(state, current)) + break; + tag = sbitmap_queue_get(sbq, cpup); + if (tag >= 0) + break; + schedule(); + } + + sbitmap_finish_wait(sbq, ws, &wait); + return tag; +} + +/* + * May be called from software interrupt (timer) context for allocating + * iSCSI NopINs. + */ +struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *conn, int state) +{ + struct iscsit_cmd *cmd; + struct se_session *se_sess = conn->sess->se_sess; + int size, tag, cpu; + + tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); + if (tag < 0) + tag = iscsit_wait_for_tag(se_sess, state, &cpu); + if (tag < 0) + return NULL; + + size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size; + cmd = (struct iscsit_cmd *)(se_sess->sess_cmd_map + (tag * size)); + memset(cmd, 0, size); + + cmd->se_cmd.map_tag = tag; + cmd->se_cmd.map_cpu = cpu; + cmd->conn = conn; + cmd->data_direction = DMA_NONE; + INIT_LIST_HEAD(&cmd->i_conn_node); + INIT_LIST_HEAD(&cmd->datain_list); + INIT_LIST_HEAD(&cmd->cmd_r2t_list); + spin_lock_init(&cmd->datain_lock); + spin_lock_init(&cmd->dataout_timeout_lock); + spin_lock_init(&cmd->istate_lock); + spin_lock_init(&cmd->error_lock); + spin_lock_init(&cmd->r2t_lock); + timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0); + + return cmd; +} +EXPORT_SYMBOL(iscsit_allocate_cmd); + +struct iscsi_seq *iscsit_get_seq_holder_for_datain( + struct iscsit_cmd *cmd, + u32 seq_send_order) +{ + u32 i; + + for (i = 0; i < cmd->seq_count; i++) + if (cmd->seq_list[i].seq_send_order == seq_send_order) + return &cmd->seq_list[i]; + + return NULL; +} + +struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *cmd) +{ + u32 i; + + if (!cmd->seq_list) { + pr_err("struct iscsit_cmd->seq_list is NULL!\n"); + return NULL; + } + + for (i = 0; i < cmd->seq_count; i++) { + if (cmd->seq_list[i].type != SEQTYPE_NORMAL) + continue; + if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { + cmd->seq_send_order++; + return &cmd->seq_list[i]; + } + } + + return NULL; +} + +struct iscsi_r2t *iscsit_get_holder_for_r2tsn( + struct iscsit_cmd *cmd, + u32 r2t_sn) +{ + struct iscsi_r2t *r2t; + + spin_lock_bh(&cmd->r2t_lock); + list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { + if (r2t->r2t_sn == r2t_sn) { + spin_unlock_bh(&cmd->r2t_lock); + return r2t; + } + } + spin_unlock_bh(&cmd->r2t_lock); + + return NULL; +} + +static inline int iscsit_check_received_cmdsn(struct iscsit_session *sess, u32 cmdsn) +{ + u32 max_cmdsn; + int ret; + + /* + * This is the proper method of checking received CmdSN against + * ExpCmdSN and MaxCmdSN values, as well as accounting for out + * or order CmdSNs due to multiple connection sessions and/or + * CRC failures. + */ + max_cmdsn = atomic_read(&sess->max_cmd_sn); + if (iscsi_sna_gt(cmdsn, max_cmdsn)) { + pr_err("Received CmdSN: 0x%08x is greater than" + " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn); + ret = CMDSN_MAXCMDSN_OVERRUN; + + } else if (cmdsn == sess->exp_cmd_sn) { + sess->exp_cmd_sn++; + pr_debug("Received CmdSN matches ExpCmdSN," + " incremented ExpCmdSN to: 0x%08x\n", + sess->exp_cmd_sn); + ret = CMDSN_NORMAL_OPERATION; + + } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { + pr_debug("Received CmdSN: 0x%08x is greater" + " than ExpCmdSN: 0x%08x, not acknowledging.\n", + cmdsn, sess->exp_cmd_sn); + ret = CMDSN_HIGHER_THAN_EXP; + + } else { + pr_err("Received CmdSN: 0x%08x is less than" + " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, + sess->exp_cmd_sn); + ret = CMDSN_LOWER_THAN_EXP; + } + + return ret; +} + +/* + * Commands may be received out of order if MC/S is in use. + * Ensure they are executed in CmdSN order. + */ +int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char *buf, __be32 cmdsn) +{ + int ret, cmdsn_ret; + bool reject = false; + u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES; + + mutex_lock(&conn->sess->cmdsn_mutex); + + cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn)); + switch (cmdsn_ret) { + case CMDSN_NORMAL_OPERATION: + ret = iscsit_execute_cmd(cmd, 0); + if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) + iscsit_execute_ooo_cmdsns(conn->sess); + else if (ret < 0) { + reject = true; + ret = CMDSN_ERROR_CANNOT_RECOVER; + } + break; + case CMDSN_HIGHER_THAN_EXP: + ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn)); + if (ret < 0) { + reject = true; + ret = CMDSN_ERROR_CANNOT_RECOVER; + break; + } + ret = CMDSN_HIGHER_THAN_EXP; + break; + case CMDSN_LOWER_THAN_EXP: + case CMDSN_MAXCMDSN_OVERRUN: + default: + cmd->i_state = ISTATE_REMOVE; + iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); + /* + * Existing callers for iscsit_sequence_cmd() will silently + * ignore commands with CMDSN_LOWER_THAN_EXP, so force this + * return for CMDSN_MAXCMDSN_OVERRUN as well.. + */ + ret = CMDSN_LOWER_THAN_EXP; + break; + } + mutex_unlock(&conn->sess->cmdsn_mutex); + + if (reject) + iscsit_reject_cmd(cmd, reason, buf); + + return ret; +} +EXPORT_SYMBOL(iscsit_sequence_cmd); + +int iscsit_check_unsolicited_dataout(struct iscsit_cmd *cmd, unsigned char *buf) +{ + struct iscsit_conn *conn = cmd->conn; + struct se_cmd *se_cmd = &cmd->se_cmd; + struct iscsi_data *hdr = (struct iscsi_data *) buf; + u32 payload_length = ntoh24(hdr->dlength); + + if (conn->sess->sess_ops->InitialR2T) { + pr_err("Received unexpected unsolicited data" + " while InitialR2T=Yes, protocol error.\n"); + transport_send_check_condition_and_sense(se_cmd, + TCM_UNEXPECTED_UNSOLICITED_DATA, 0); + return -1; + } + + if ((cmd->first_burst_len + payload_length) > + conn->sess->sess_ops->FirstBurstLength) { + pr_err("Total %u bytes exceeds FirstBurstLength: %u" + " for this Unsolicited DataOut Burst.\n", + (cmd->first_burst_len + payload_length), + conn->sess->sess_ops->FirstBurstLength); + transport_send_check_condition_and_sense(se_cmd, + TCM_INCORRECT_AMOUNT_OF_DATA, 0); + return -1; + } + + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) + return 0; + + if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) && + ((cmd->first_burst_len + payload_length) != + conn->sess->sess_ops->FirstBurstLength)) { + pr_err("Unsolicited non-immediate data received %u" + " does not equal FirstBurstLength: %u, and does" + " not equal ExpXferLen %u.\n", + (cmd->first_burst_len + payload_length), + conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); + transport_send_check_condition_and_sense(se_cmd, + TCM_INCORRECT_AMOUNT_OF_DATA, 0); + return -1; + } + return 0; +} + +struct iscsit_cmd *iscsit_find_cmd_from_itt( + struct iscsit_conn *conn, + itt_t init_task_tag) +{ + struct iscsit_cmd *cmd; + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { + if (cmd->init_task_tag == init_task_tag) { + spin_unlock_bh(&conn->cmd_lock); + return cmd; + } + } + spin_unlock_bh(&conn->cmd_lock); + + pr_err("Unable to locate ITT: 0x%08x on CID: %hu", + init_task_tag, conn->cid); + return NULL; +} +EXPORT_SYMBOL(iscsit_find_cmd_from_itt); + +struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump( + struct iscsit_conn *conn, + itt_t init_task_tag, + u32 length) +{ + struct iscsit_cmd *cmd; + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { + if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) + continue; + if (cmd->init_task_tag == init_task_tag) { + spin_unlock_bh(&conn->cmd_lock); + return cmd; + } + } + spin_unlock_bh(&conn->cmd_lock); + + pr_err("Unable to locate ITT: 0x%08x on CID: %hu," + " dumping payload\n", init_task_tag, conn->cid); + if (length) + iscsit_dump_data_payload(conn, length, 1); + + return NULL; +} +EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump); + +struct iscsit_cmd *iscsit_find_cmd_from_ttt( + struct iscsit_conn *conn, + u32 targ_xfer_tag) +{ + struct iscsit_cmd *cmd = NULL; + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { + if (cmd->targ_xfer_tag == targ_xfer_tag) { + spin_unlock_bh(&conn->cmd_lock); + return cmd; + } + } + spin_unlock_bh(&conn->cmd_lock); + + pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", + targ_xfer_tag, conn->cid); + return NULL; +} + +int iscsit_find_cmd_for_recovery( + struct iscsit_session *sess, + struct iscsit_cmd **cmd_ptr, + struct iscsi_conn_recovery **cr_ptr, + itt_t init_task_tag) +{ + struct iscsit_cmd *cmd = NULL; + struct iscsi_conn_recovery *cr; + /* + * Scan through the inactive connection recovery list's command list. + * If init_task_tag matches the command is still alligent. + */ + spin_lock(&sess->cr_i_lock); + list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { + spin_lock(&cr->conn_recovery_cmd_lock); + list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { + if (cmd->init_task_tag == init_task_tag) { + spin_unlock(&cr->conn_recovery_cmd_lock); + spin_unlock(&sess->cr_i_lock); + + *cr_ptr = cr; + *cmd_ptr = cmd; + return -2; + } + } + spin_unlock(&cr->conn_recovery_cmd_lock); + } + spin_unlock(&sess->cr_i_lock); + /* + * Scan through the active connection recovery list's command list. + * If init_task_tag matches the command is ready to be reassigned. + */ + spin_lock(&sess->cr_a_lock); + list_for_each_entry(cr, &sess->cr_active_list, cr_list) { + spin_lock(&cr->conn_recovery_cmd_lock); + list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { + if (cmd->init_task_tag == init_task_tag) { + spin_unlock(&cr->conn_recovery_cmd_lock); + spin_unlock(&sess->cr_a_lock); + + *cr_ptr = cr; + *cmd_ptr = cmd; + return 0; + } + } + spin_unlock(&cr->conn_recovery_cmd_lock); + } + spin_unlock(&sess->cr_a_lock); + + return -1; +} + +void iscsit_add_cmd_to_immediate_queue( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn, + u8 state) +{ + struct iscsi_queue_req *qr; + + qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); + if (!qr) { + pr_err("Unable to allocate memory for" + " struct iscsi_queue_req\n"); + return; + } + INIT_LIST_HEAD(&qr->qr_list); + qr->cmd = cmd; + qr->state = state; + + spin_lock_bh(&conn->immed_queue_lock); + list_add_tail(&qr->qr_list, &conn->immed_queue_list); + atomic_inc(&cmd->immed_queue_count); + atomic_set(&conn->check_immediate_queue, 1); + spin_unlock_bh(&conn->immed_queue_lock); + + wake_up(&conn->queues_wq); +} +EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue); + +struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *conn) +{ + struct iscsi_queue_req *qr; + + spin_lock_bh(&conn->immed_queue_lock); + if (list_empty(&conn->immed_queue_list)) { + spin_unlock_bh(&conn->immed_queue_lock); + return NULL; + } + qr = list_first_entry(&conn->immed_queue_list, + struct iscsi_queue_req, qr_list); + + list_del(&qr->qr_list); + if (qr->cmd) + atomic_dec(&qr->cmd->immed_queue_count); + spin_unlock_bh(&conn->immed_queue_lock); + + return qr; +} + +static void iscsit_remove_cmd_from_immediate_queue( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct iscsi_queue_req *qr, *qr_tmp; + + spin_lock_bh(&conn->immed_queue_lock); + if (!atomic_read(&cmd->immed_queue_count)) { + spin_unlock_bh(&conn->immed_queue_lock); + return; + } + + list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { + if (qr->cmd != cmd) + continue; + + atomic_dec(&qr->cmd->immed_queue_count); + list_del(&qr->qr_list); + kmem_cache_free(lio_qr_cache, qr); + } + spin_unlock_bh(&conn->immed_queue_lock); + + if (atomic_read(&cmd->immed_queue_count)) { + pr_err("ITT: 0x%08x immed_queue_count: %d\n", + cmd->init_task_tag, + atomic_read(&cmd->immed_queue_count)); + } +} + +int iscsit_add_cmd_to_response_queue( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn, + u8 state) +{ + struct iscsi_queue_req *qr; + + qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); + if (!qr) { + pr_err("Unable to allocate memory for" + " struct iscsi_queue_req\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&qr->qr_list); + qr->cmd = cmd; + qr->state = state; + + spin_lock_bh(&conn->response_queue_lock); + list_add_tail(&qr->qr_list, &conn->response_queue_list); + atomic_inc(&cmd->response_queue_count); + spin_unlock_bh(&conn->response_queue_lock); + + wake_up(&conn->queues_wq); + return 0; +} + +struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *conn) +{ + struct iscsi_queue_req *qr; + + spin_lock_bh(&conn->response_queue_lock); + if (list_empty(&conn->response_queue_list)) { + spin_unlock_bh(&conn->response_queue_lock); + return NULL; + } + + qr = list_first_entry(&conn->response_queue_list, + struct iscsi_queue_req, qr_list); + + list_del(&qr->qr_list); + if (qr->cmd) + atomic_dec(&qr->cmd->response_queue_count); + spin_unlock_bh(&conn->response_queue_lock); + + return qr; +} + +static void iscsit_remove_cmd_from_response_queue( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct iscsi_queue_req *qr, *qr_tmp; + + spin_lock_bh(&conn->response_queue_lock); + if (!atomic_read(&cmd->response_queue_count)) { + spin_unlock_bh(&conn->response_queue_lock); + return; + } + + list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, + qr_list) { + if (qr->cmd != cmd) + continue; + + atomic_dec(&qr->cmd->response_queue_count); + list_del(&qr->qr_list); + kmem_cache_free(lio_qr_cache, qr); + } + spin_unlock_bh(&conn->response_queue_lock); + + if (atomic_read(&cmd->response_queue_count)) { + pr_err("ITT: 0x%08x response_queue_count: %d\n", + cmd->init_task_tag, + atomic_read(&cmd->response_queue_count)); + } +} + +bool iscsit_conn_all_queues_empty(struct iscsit_conn *conn) +{ + bool empty; + + spin_lock_bh(&conn->immed_queue_lock); + empty = list_empty(&conn->immed_queue_list); + spin_unlock_bh(&conn->immed_queue_lock); + + if (!empty) + return empty; + + spin_lock_bh(&conn->response_queue_lock); + empty = list_empty(&conn->response_queue_list); + spin_unlock_bh(&conn->response_queue_lock); + + return empty; +} + +void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *conn) +{ + struct iscsi_queue_req *qr, *qr_tmp; + + spin_lock_bh(&conn->immed_queue_lock); + list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { + list_del(&qr->qr_list); + if (qr->cmd) + atomic_dec(&qr->cmd->immed_queue_count); + + kmem_cache_free(lio_qr_cache, qr); + } + spin_unlock_bh(&conn->immed_queue_lock); + + spin_lock_bh(&conn->response_queue_lock); + list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, + qr_list) { + list_del(&qr->qr_list); + if (qr->cmd) + atomic_dec(&qr->cmd->response_queue_count); + + kmem_cache_free(lio_qr_cache, qr); + } + spin_unlock_bh(&conn->response_queue_lock); +} + +void iscsit_release_cmd(struct iscsit_cmd *cmd) +{ + struct iscsit_session *sess; + struct se_cmd *se_cmd = &cmd->se_cmd; + + WARN_ON(!list_empty(&cmd->i_conn_node)); + + if (cmd->conn) + sess = cmd->conn->sess; + else + sess = cmd->sess; + + BUG_ON(!sess || !sess->se_sess); + + kfree(cmd->buf_ptr); + kfree(cmd->pdu_list); + kfree(cmd->seq_list); + kfree(cmd->tmr_req); + kfree(cmd->overflow_buf); + kfree(cmd->iov_data); + kfree(cmd->text_in_ptr); + + target_free_tag(sess->se_sess, se_cmd); +} +EXPORT_SYMBOL(iscsit_release_cmd); + +void __iscsit_free_cmd(struct iscsit_cmd *cmd, bool check_queues) +{ + struct iscsit_conn *conn = cmd->conn; + + WARN_ON(!list_empty(&cmd->i_conn_node)); + + if (cmd->data_direction == DMA_TO_DEVICE) { + iscsit_stop_dataout_timer(cmd); + iscsit_free_r2ts_from_list(cmd); + } + if (cmd->data_direction == DMA_FROM_DEVICE) + iscsit_free_all_datain_reqs(cmd); + + if (conn && check_queues) { + iscsit_remove_cmd_from_immediate_queue(cmd, conn); + iscsit_remove_cmd_from_response_queue(cmd, conn); + } + + if (conn && conn->conn_transport->iscsit_unmap_cmd) + conn->conn_transport->iscsit_unmap_cmd(conn, cmd); +} + +void iscsit_free_cmd(struct iscsit_cmd *cmd, bool shutdown) +{ + struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL; + int rc; + + WARN_ON(!list_empty(&cmd->i_conn_node)); + + __iscsit_free_cmd(cmd, shutdown); + if (se_cmd) { + rc = transport_generic_free_cmd(se_cmd, shutdown); + if (!rc && shutdown && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, shutdown); + target_put_sess_cmd(se_cmd); + } + } else { + iscsit_release_cmd(cmd); + } +} +EXPORT_SYMBOL(iscsit_free_cmd); + +bool iscsit_check_session_usage_count(struct iscsit_session *sess, + bool can_sleep) +{ + spin_lock_bh(&sess->session_usage_lock); + if (sess->session_usage_count != 0) { + sess->session_waiting_on_uc = 1; + spin_unlock_bh(&sess->session_usage_lock); + if (!can_sleep) + return true; + + wait_for_completion(&sess->session_waiting_on_uc_comp); + return false; + } + spin_unlock_bh(&sess->session_usage_lock); + + return false; +} + +void iscsit_dec_session_usage_count(struct iscsit_session *sess) +{ + spin_lock_bh(&sess->session_usage_lock); + sess->session_usage_count--; + + if (!sess->session_usage_count && sess->session_waiting_on_uc) + complete(&sess->session_waiting_on_uc_comp); + + spin_unlock_bh(&sess->session_usage_lock); +} + +void iscsit_inc_session_usage_count(struct iscsit_session *sess) +{ + spin_lock_bh(&sess->session_usage_lock); + sess->session_usage_count++; + spin_unlock_bh(&sess->session_usage_lock); +} + +struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *sess, u16 cid) +{ + struct iscsit_conn *conn; + + spin_lock_bh(&sess->conn_lock); + list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { + if ((conn->cid == cid) && + (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { + iscsit_inc_conn_usage_count(conn); + spin_unlock_bh(&sess->conn_lock); + return conn; + } + } + spin_unlock_bh(&sess->conn_lock); + + return NULL; +} + +struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *sess, u16 cid) +{ + struct iscsit_conn *conn; + + spin_lock_bh(&sess->conn_lock); + list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { + if (conn->cid == cid) { + iscsit_inc_conn_usage_count(conn); + spin_lock(&conn->state_lock); + atomic_set(&conn->connection_wait_rcfr, 1); + spin_unlock(&conn->state_lock); + spin_unlock_bh(&sess->conn_lock); + return conn; + } + } + spin_unlock_bh(&sess->conn_lock); + + return NULL; +} + +void iscsit_check_conn_usage_count(struct iscsit_conn *conn) +{ + spin_lock_bh(&conn->conn_usage_lock); + if (conn->conn_usage_count != 0) { + conn->conn_waiting_on_uc = 1; + spin_unlock_bh(&conn->conn_usage_lock); + + wait_for_completion(&conn->conn_waiting_on_uc_comp); + return; + } + spin_unlock_bh(&conn->conn_usage_lock); +} + +void iscsit_dec_conn_usage_count(struct iscsit_conn *conn) +{ + spin_lock_bh(&conn->conn_usage_lock); + conn->conn_usage_count--; + + if (!conn->conn_usage_count && conn->conn_waiting_on_uc) + complete(&conn->conn_waiting_on_uc_comp); + + spin_unlock_bh(&conn->conn_usage_lock); +} + +void iscsit_inc_conn_usage_count(struct iscsit_conn *conn) +{ + spin_lock_bh(&conn->conn_usage_lock); + conn->conn_usage_count++; + spin_unlock_bh(&conn->conn_usage_lock); +} + +static int iscsit_add_nopin(struct iscsit_conn *conn, int want_response) +{ + u8 state; + struct iscsit_cmd *cmd; + + cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); + if (!cmd) + return -1; + + cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; + state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : + ISTATE_SEND_NOPIN_NO_RESPONSE; + cmd->init_task_tag = RESERVED_ITT; + cmd->targ_xfer_tag = (want_response) ? + session_get_next_ttt(conn->sess) : 0xFFFFFFFF; + spin_lock_bh(&conn->cmd_lock); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); + spin_unlock_bh(&conn->cmd_lock); + + if (want_response) + iscsit_start_nopin_response_timer(conn); + iscsit_add_cmd_to_immediate_queue(cmd, conn, state); + + return 0; +} + +void iscsit_handle_nopin_response_timeout(struct timer_list *t) +{ + struct iscsit_conn *conn = from_timer(conn, t, nopin_response_timer); + struct iscsit_session *sess = conn->sess; + + iscsit_inc_conn_usage_count(conn); + + spin_lock_bh(&conn->nopin_timer_lock); + if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { + spin_unlock_bh(&conn->nopin_timer_lock); + iscsit_dec_conn_usage_count(conn); + return; + } + + pr_err("Did not receive response to NOPIN on CID: %hu, failing" + " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n", + conn->cid, sess->sess_ops->InitiatorName, sess->isid, + sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt); + conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; + spin_unlock_bh(&conn->nopin_timer_lock); + + iscsit_fill_cxn_timeout_err_stats(sess); + iscsit_cause_connection_reinstatement(conn, 0); + iscsit_dec_conn_usage_count(conn); +} + +void iscsit_mod_nopin_response_timer(struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); + + spin_lock_bh(&conn->nopin_timer_lock); + if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { + spin_unlock_bh(&conn->nopin_timer_lock); + return; + } + + mod_timer(&conn->nopin_response_timer, + (get_jiffies_64() + na->nopin_response_timeout * HZ)); + spin_unlock_bh(&conn->nopin_timer_lock); +} + +void iscsit_start_nopin_response_timer(struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); + + spin_lock_bh(&conn->nopin_timer_lock); + if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { + spin_unlock_bh(&conn->nopin_timer_lock); + return; + } + + conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; + conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; + mod_timer(&conn->nopin_response_timer, + jiffies + na->nopin_response_timeout * HZ); + + pr_debug("Started NOPIN Response Timer on CID: %d to %u" + " seconds\n", conn->cid, na->nopin_response_timeout); + spin_unlock_bh(&conn->nopin_timer_lock); +} + +void iscsit_stop_nopin_response_timer(struct iscsit_conn *conn) +{ + spin_lock_bh(&conn->nopin_timer_lock); + if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { + spin_unlock_bh(&conn->nopin_timer_lock); + return; + } + conn->nopin_response_timer_flags |= ISCSI_TF_STOP; + spin_unlock_bh(&conn->nopin_timer_lock); + + del_timer_sync(&conn->nopin_response_timer); + + spin_lock_bh(&conn->nopin_timer_lock); + conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; + spin_unlock_bh(&conn->nopin_timer_lock); +} + +void iscsit_handle_nopin_timeout(struct timer_list *t) +{ + struct iscsit_conn *conn = from_timer(conn, t, nopin_timer); + + iscsit_inc_conn_usage_count(conn); + + spin_lock_bh(&conn->nopin_timer_lock); + if (conn->nopin_timer_flags & ISCSI_TF_STOP) { + spin_unlock_bh(&conn->nopin_timer_lock); + iscsit_dec_conn_usage_count(conn); + return; + } + conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; + spin_unlock_bh(&conn->nopin_timer_lock); + + iscsit_add_nopin(conn, 1); + iscsit_dec_conn_usage_count(conn); +} + +void __iscsit_start_nopin_timer(struct iscsit_conn *conn) +{ + struct iscsit_session *sess = conn->sess; + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); + + lockdep_assert_held(&conn->nopin_timer_lock); + + /* + * NOPIN timeout is disabled. + */ + if (!na->nopin_timeout) + return; + + if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) + return; + + conn->nopin_timer_flags &= ~ISCSI_TF_STOP; + conn->nopin_timer_flags |= ISCSI_TF_RUNNING; + mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); + + pr_debug("Started NOPIN Timer on CID: %d at %u second" + " interval\n", conn->cid, na->nopin_timeout); +} + +void iscsit_start_nopin_timer(struct iscsit_conn *conn) +{ + spin_lock_bh(&conn->nopin_timer_lock); + __iscsit_start_nopin_timer(conn); + spin_unlock_bh(&conn->nopin_timer_lock); +} + +void iscsit_stop_nopin_timer(struct iscsit_conn *conn) +{ + spin_lock_bh(&conn->nopin_timer_lock); + if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { + spin_unlock_bh(&conn->nopin_timer_lock); + return; + } + conn->nopin_timer_flags |= ISCSI_TF_STOP; + spin_unlock_bh(&conn->nopin_timer_lock); + + del_timer_sync(&conn->nopin_timer); + + spin_lock_bh(&conn->nopin_timer_lock); + conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; + spin_unlock_bh(&conn->nopin_timer_lock); +} + +void iscsit_login_timeout(struct timer_list *t) +{ + struct iscsit_conn *conn = from_timer(conn, t, login_timer); + struct iscsi_login *login = conn->login; + + pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); + + spin_lock_bh(&conn->login_timer_lock); + login->login_failed = 1; + + if (conn->login_kworker) { + pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", + conn->login_kworker->comm, conn->login_kworker->pid); + send_sig(SIGINT, conn->login_kworker, 1); + } else { + schedule_delayed_work(&conn->login_work, 0); + } + spin_unlock_bh(&conn->login_timer_lock); +} + +void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr) +{ + pr_debug("Login timer started\n"); + + conn->login_kworker = kthr; + mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); +} + +int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr) +{ + struct iscsi_login *login = conn->login; + int ret = 0; + + spin_lock_bh(&conn->login_timer_lock); + if (login->login_failed) { + /* The timer has already expired */ + ret = -1; + } else { + conn->login_kworker = kthr; + } + spin_unlock_bh(&conn->login_timer_lock); + + return ret; +} + +void iscsit_stop_login_timer(struct iscsit_conn *conn) +{ + pr_debug("Login timer stopped\n"); + timer_delete_sync(&conn->login_timer); +} + +int iscsit_send_tx_data( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn, + int use_misc) +{ + int tx_sent, tx_size; + u32 iov_count; + struct kvec *iov; + +send_data: + tx_size = cmd->tx_size; + + if (!use_misc) { + iov = &cmd->iov_data[0]; + iov_count = cmd->iov_data_count; + } else { + iov = &cmd->iov_misc[0]; + iov_count = cmd->iov_misc_count; + } + + tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); + if (tx_size != tx_sent) { + if (tx_sent == -EAGAIN) { + pr_err("tx_data() returned -EAGAIN\n"); + goto send_data; + } else + return -1; + } + cmd->tx_size = 0; + + return 0; +} + +int iscsit_fe_sendpage_sg( + struct iscsit_cmd *cmd, + struct iscsit_conn *conn) +{ + struct scatterlist *sg = cmd->first_data_sg; + struct bio_vec bvec; + struct msghdr msghdr = { .msg_flags = MSG_SPLICE_PAGES, }; + struct kvec iov; + u32 tx_hdr_size, data_len; + u32 offset = cmd->first_data_sg_off; + int tx_sent, iov_off; + +send_hdr: + tx_hdr_size = ISCSI_HDR_LEN; + if (conn->conn_ops->HeaderDigest) + tx_hdr_size += ISCSI_CRC_LEN; + + iov.iov_base = cmd->pdu; + iov.iov_len = tx_hdr_size; + + tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); + if (tx_hdr_size != tx_sent) { + if (tx_sent == -EAGAIN) { + pr_err("tx_data() returned -EAGAIN\n"); + goto send_hdr; + } + return -1; + } + + data_len = cmd->tx_size - tx_hdr_size - cmd->padding; + /* + * Set iov_off used by padding and data digest tx_data() calls below + * in order to determine proper offset into cmd->iov_data[] + */ + if (conn->conn_ops->DataDigest) { + data_len -= ISCSI_CRC_LEN; + if (cmd->padding) + iov_off = (cmd->iov_data_count - 2); + else + iov_off = (cmd->iov_data_count - 1); + } else { + iov_off = (cmd->iov_data_count - 1); + } + /* + * Perform sendpage() for each page in the scatterlist + */ + while (data_len) { + u32 space = (sg->length - offset); + u32 sub_len = min_t(u32, data_len, space); +send_pg: + bvec_set_page(&bvec, sg_page(sg), sub_len, sg->offset + offset); + iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, sub_len); + + tx_sent = conn->sock->ops->sendmsg(conn->sock, &msghdr, + sub_len); + if (tx_sent != sub_len) { + if (tx_sent == -EAGAIN) { + pr_err("sendmsg/splice returned -EAGAIN\n"); + goto send_pg; + } + + pr_err("sendmsg/splice failure: %d\n", tx_sent); + return -1; + } + + data_len -= sub_len; + offset = 0; + sg = sg_next(sg); + } + +send_padding: + if (cmd->padding) { + struct kvec *iov_p = &cmd->iov_data[iov_off++]; + + tx_sent = tx_data(conn, iov_p, 1, cmd->padding); + if (cmd->padding != tx_sent) { + if (tx_sent == -EAGAIN) { + pr_err("tx_data() returned -EAGAIN\n"); + goto send_padding; + } + return -1; + } + } + +send_datacrc: + if (conn->conn_ops->DataDigest) { + struct kvec *iov_d = &cmd->iov_data[iov_off]; + + tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); + if (ISCSI_CRC_LEN != tx_sent) { + if (tx_sent == -EAGAIN) { + pr_err("tx_data() returned -EAGAIN\n"); + goto send_datacrc; + } + return -1; + } + } + + return 0; +} + +/* + * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU + * back to the Initiator when an expection condition occurs with the + * errors set in status_class and status_detail. + * + * Parameters: iSCSI Connection, Status Class, Status Detail. + * Returns: 0 on success, -1 on error. + */ +int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_detail) +{ + struct iscsi_login_rsp *hdr; + struct iscsi_login *login = conn->conn_login; + + login->login_failed = 1; + iscsit_collect_login_stats(conn, status_class, status_detail); + + memset(&login->rsp[0], 0, ISCSI_HDR_LEN); + + hdr = (struct iscsi_login_rsp *)&login->rsp[0]; + hdr->opcode = ISCSI_OP_LOGIN_RSP; + hdr->status_class = status_class; + hdr->status_detail = status_detail; + hdr->itt = conn->login_itt; + + return conn->conn_transport->iscsit_put_login_tx(conn, login, 0); +} + +void iscsit_print_session_params(struct iscsit_session *sess) +{ + struct iscsit_conn *conn; + + pr_debug("-----------------------------[Session Params for" + " SID: %u]-----------------------------\n", sess->sid); + spin_lock_bh(&sess->conn_lock); + list_for_each_entry(conn, &sess->sess_conn_list, conn_list) + iscsi_dump_conn_ops(conn->conn_ops); + spin_unlock_bh(&sess->conn_lock); + + iscsi_dump_sess_ops(sess->sess_ops); +} + +int rx_data( + struct iscsit_conn *conn, + struct kvec *iov, + int iov_count, + int data) +{ + int rx_loop = 0, total_rx = 0; + struct msghdr msg; + + if (!conn || !conn->sock || !conn->conn_ops) + return -1; + + memset(&msg, 0, sizeof(struct msghdr)); + iov_iter_kvec(&msg.msg_iter, ITER_DEST, iov, iov_count, data); + + while (msg_data_left(&msg)) { + rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL); + if (rx_loop <= 0) { + pr_debug("rx_loop: %d total_rx: %d\n", + rx_loop, total_rx); + return rx_loop; + } + total_rx += rx_loop; + pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", + rx_loop, total_rx, data); + } + + return total_rx; +} + +int tx_data( + struct iscsit_conn *conn, + struct kvec *iov, + int iov_count, + int data) +{ + struct msghdr msg; + int total_tx = 0; + + if (!conn || !conn->sock || !conn->conn_ops) + return -1; + + if (data <= 0) { + pr_err("Data length is: %d\n", data); + return -1; + } + + memset(&msg, 0, sizeof(struct msghdr)); + + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, iov_count, data); + + while (msg_data_left(&msg)) { + int tx_loop = sock_sendmsg(conn->sock, &msg); + if (tx_loop <= 0) { + pr_debug("tx_loop: %d total_tx %d\n", + tx_loop, total_tx); + return tx_loop; + } + total_tx += tx_loop; + pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", + tx_loop, total_tx, data); + } + + return total_tx; +} + +void iscsit_collect_login_stats( + struct iscsit_conn *conn, + u8 status_class, + u8 status_detail) +{ + struct iscsi_param *intrname = NULL; + struct iscsi_tiqn *tiqn; + struct iscsi_login_stats *ls; + + tiqn = iscsit_snmp_get_tiqn(conn); + if (!tiqn) + return; + + ls = &tiqn->login_stats; + + spin_lock(&ls->lock); + if (status_class == ISCSI_STATUS_CLS_SUCCESS) + ls->accepts++; + else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { + ls->redirects++; + ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; + } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && + (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { + ls->authenticate_fails++; + ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; + } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && + (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { + ls->authorize_fails++; + ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; + } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && + (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { + ls->negotiate_fails++; + ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; + } else { + ls->other_fails++; + ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; + } + + /* Save initiator name, ip address and time, if it is a failed login */ + if (status_class != ISCSI_STATUS_CLS_SUCCESS) { + if (conn->param_list) + intrname = iscsi_find_param_from_key(INITIATORNAME, + conn->param_list); + strscpy(ls->last_intr_fail_name, + (intrname ? intrname->value : "Unknown"), + sizeof(ls->last_intr_fail_name)); + + ls->last_intr_fail_ip_family = conn->login_family; + + ls->last_intr_fail_sockaddr = conn->login_sockaddr; + ls->last_fail_time = get_jiffies_64(); + } + + spin_unlock(&ls->lock); +} + +struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *conn) +{ + struct iscsi_portal_group *tpg; + + if (!conn) + return NULL; + + tpg = conn->tpg; + if (!tpg) + return NULL; + + if (!tpg->tpg_tiqn) + return NULL; + + return tpg->tpg_tiqn; +} + +void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess) +{ + struct iscsi_portal_group *tpg = sess->tpg; + struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; + + if (!tiqn) + return; + + spin_lock_bh(&tiqn->sess_err_stats.lock); + strscpy(tiqn->sess_err_stats.last_sess_fail_rem_name, + sess->sess_ops->InitiatorName, + sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name)); + tiqn->sess_err_stats.last_sess_failure_type = + ISCSI_SESS_ERR_CXN_TIMEOUT; + tiqn->sess_err_stats.cxn_timeout_errors++; + atomic_long_inc(&sess->conn_timeout_errors); + spin_unlock_bh(&tiqn->sess_err_stats.lock); +} diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h new file mode 100644 index 0000000000..24b8e57757 --- /dev/null +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ISCSI_TARGET_UTIL_H +#define ISCSI_TARGET_UTIL_H + +#include <linux/types.h> +#include <scsi/iscsi_proto.h> /* itt_t */ + +#define MARKER_SIZE 8 + +struct iscsit_cmd; +struct iscsit_conn; +struct iscsi_conn_recovery; +struct iscsit_session; + +extern int iscsit_add_r2t_to_list(struct iscsit_cmd *, u32, u32, int, u32); +extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsit_cmd *, u32, u32); +extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *); +extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *); +extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *); +extern struct iscsit_cmd *iscsit_alloc_cmd(struct iscsit_conn *, gfp_t); +extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int); +extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32); +extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *); +extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsit_cmd *, u32); +extern int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, + unsigned char * ,__be32 cmdsn); +extern int iscsit_check_unsolicited_dataout(struct iscsit_cmd *, unsigned char *); +extern struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *, + itt_t, u32); +extern struct iscsit_cmd *iscsit_find_cmd_from_ttt(struct iscsit_conn *, u32); +extern int iscsit_find_cmd_for_recovery(struct iscsit_session *, struct iscsit_cmd **, + struct iscsi_conn_recovery **, itt_t); +extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *, struct iscsit_conn *, u8); +extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *); +extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8); +extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *); +extern void iscsit_remove_cmd_from_tx_queues(struct iscsit_cmd *, struct iscsit_conn *); +extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *); +extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *); +extern void iscsit_release_cmd(struct iscsit_cmd *); +extern void __iscsit_free_cmd(struct iscsit_cmd *, bool); +extern void iscsit_free_cmd(struct iscsit_cmd *, bool); +extern bool iscsit_check_session_usage_count(struct iscsit_session *sess, bool can_sleep); +extern void iscsit_dec_session_usage_count(struct iscsit_session *); +extern void iscsit_inc_session_usage_count(struct iscsit_session *); +extern struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *, u16); +extern struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *, u16); +extern void iscsit_check_conn_usage_count(struct iscsit_conn *); +extern void iscsit_dec_conn_usage_count(struct iscsit_conn *); +extern void iscsit_inc_conn_usage_count(struct iscsit_conn *); +extern void iscsit_handle_nopin_response_timeout(struct timer_list *t); +extern void iscsit_mod_nopin_response_timer(struct iscsit_conn *); +extern void iscsit_start_nopin_response_timer(struct iscsit_conn *); +extern void iscsit_stop_nopin_response_timer(struct iscsit_conn *); +extern void iscsit_handle_nopin_timeout(struct timer_list *t); +extern void __iscsit_start_nopin_timer(struct iscsit_conn *); +extern void iscsit_start_nopin_timer(struct iscsit_conn *); +extern void iscsit_stop_nopin_timer(struct iscsit_conn *); +extern void iscsit_login_timeout(struct timer_list *t); +extern void iscsit_start_login_timer(struct iscsit_conn *, struct task_struct *kthr); +extern void iscsit_stop_login_timer(struct iscsit_conn *); +extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_struct *kthr); +extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int); +extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *); +extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8); +extern void iscsit_print_session_params(struct iscsit_session *); +extern int iscsit_print_dev_to_proc(char *, char **, off_t, int); +extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int); +extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int); +extern int rx_data(struct iscsit_conn *, struct kvec *, int, int); +extern int tx_data(struct iscsit_conn *, struct kvec *, int, int); +extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8); +extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *); +extern void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *); + +#endif /*** ISCSI_TARGET_UTIL_H ***/ diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig new file mode 100644 index 0000000000..f40f316073 --- /dev/null +++ b/drivers/target/loopback/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config LOOPBACK_TARGET + tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module" + depends on SCSI + help + Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD + fabric loopback module. diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile new file mode 100644 index 0000000000..336bd44bf9 --- /dev/null +++ b/drivers/target/loopback/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c new file mode 100644 index 0000000000..4ec99a55ac --- /dev/null +++ b/drivers/target/loopback/tcm_loop.c @@ -0,0 +1,1149 @@ +/******************************************************************************* + * + * This file contains the Linux/SCSI LLD virtual SCSI initiator driver + * for emulated SAS initiator ports + * + * © Copyright 2011-2013 Datera, Inc. + * + * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * + * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ****************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/configfs.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "tcm_loop.h" + +#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) + +static struct kmem_cache *tcm_loop_cmd_cache; + +static int tcm_loop_hba_no_cnt; + +static int tcm_loop_queue_status(struct se_cmd *se_cmd); + +static unsigned int tcm_loop_nr_hw_queues = 1; +module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644); + +static unsigned int tcm_loop_can_queue = 1024; +module_param_named(can_queue, tcm_loop_can_queue, uint, 0644); + +static unsigned int tcm_loop_cmd_per_lun = 1024; +module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644); + +/* + * Called from struct target_core_fabric_ops->check_stop_free() + */ +static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) +{ + return transport_generic_free_cmd(se_cmd, 0); +} + +static void tcm_loop_release_cmd(struct se_cmd *se_cmd) +{ + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + struct scsi_cmnd *sc = tl_cmd->sc; + + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); + else + scsi_done(sc); +} + +static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + seq_puts(m, "tcm_loop_proc_info()\n"); + return 0; +} + +static int tcm_loop_driver_probe(struct device *); +static void tcm_loop_driver_remove(struct device *); + +static struct bus_type tcm_loop_lld_bus = { + .name = "tcm_loop_bus", + .probe = tcm_loop_driver_probe, + .remove = tcm_loop_driver_remove, +}; + +static struct device_driver tcm_loop_driverfs = { + .name = "tcm_loop", + .bus = &tcm_loop_lld_bus, +}; +/* + * Used with root_device_register() in tcm_loop_alloc_core_bus() below + */ +static struct device *tcm_loop_primary; + +static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd) +{ + struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; + struct scsi_cmnd *sc = tl_cmd->sc; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + struct scatterlist *sgl_bidi = NULL; + u32 sgl_bidi_count = 0, transfer_length; + + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + + /* + * Ensure that this tl_tpg reference from the incoming sc->device->id + * has already been configured via tcm_loop_make_naa_tpg(). + */ + if (!tl_tpg->tl_hba) { + set_host_byte(sc, DID_NO_CONNECT); + goto out_done; + } + if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + goto out_done; + } + tl_nexus = tl_tpg->tl_nexus; + if (!tl_nexus) { + scmd_printk(KERN_ERR, sc, + "TCM_Loop I_T Nexus does not exist\n"); + set_host_byte(sc, DID_ERROR); + goto out_done; + } + + transfer_length = scsi_transfer_length(sc); + if (!scsi_prot_sg_count(sc) && + scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { + se_cmd->prot_pto = true; + /* + * loopback transport doesn't support + * WRITE_GENERATE, READ_STRIP protection + * information operations, go ahead unprotected. + */ + transfer_length = scsi_bufflen(sc); + } + + se_cmd->tag = tl_cmd->sc_cmd_tag; + target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0], + tl_cmd->sc->device->lun, transfer_length, + TCM_SIMPLE_TAG, sc->sc_data_direction, 0); + + if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc), + scsi_sg_count(sc), sgl_bidi, sgl_bidi_count, + scsi_prot_sglist(sc), scsi_prot_sg_count(sc), + GFP_ATOMIC)) + return; + + target_queue_submission(se_cmd); + return; + +out_done: + scsi_done(sc); +} + +/* + * ->queuecommand can be and usually is called from interrupt context, so + * defer the actual submission to a workqueue. + */ +static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) +{ + struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc); + + pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n", + __func__, sc->device->host->host_no, sc->device->id, + sc->device->channel, sc->device->lun, sc->cmnd[0], + scsi_bufflen(sc)); + + memset(tl_cmd, 0, sizeof(*tl_cmd)); + tl_cmd->sc = sc; + tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag; + + tcm_loop_target_queue_cmd(tl_cmd); + return 0; +} + +/* + * Called from SCSI EH process context to issue a LUN_RESET TMR + * to struct scsi_device + */ +static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, + u64 lun, int task, enum tcm_tmreq_table tmr) +{ + struct se_cmd *se_cmd; + struct se_session *se_sess; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_cmd *tl_cmd; + int ret = TMR_FUNCTION_FAILED, rc; + + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_tpg->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without active I_T Nexus\n"); + return ret; + } + + tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); + if (!tl_cmd) + return ret; + + init_completion(&tl_cmd->tmr_done); + + se_cmd = &tl_cmd->tl_se_cmd; + se_sess = tl_tpg->tl_nexus->se_sess; + + rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun, + NULL, tmr, GFP_KERNEL, task, + TARGET_SCF_ACK_KREF); + if (rc < 0) + goto release; + wait_for_completion(&tl_cmd->tmr_done); + ret = se_cmd->se_tmr_req->response; + target_put_sess_cmd(se_cmd); + +out: + return ret; + +release: + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); + goto out; +} + +static int tcm_loop_abort_task(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + int ret; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, + scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +/* + * Called from SCSI EH process context to issue a LUN_RESET TMR + * to struct scsi_device + */ +static int tcm_loop_device_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + int ret; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, + 0, TMR_LUN_RESET); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +static int tcm_loop_target_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + if (!tl_hba) { + pr_err("Unable to perform device reset without active I_T Nexus\n"); + return FAILED; + } + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + if (tl_tpg) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return SUCCESS; + } + return FAILED; +} + +static const struct scsi_host_template tcm_loop_driver_template = { + .show_info = tcm_loop_show_info, + .proc_name = "tcm_loopback", + .name = "TCM_Loopback", + .queuecommand = tcm_loop_queuecommand, + .change_queue_depth = scsi_change_queue_depth, + .eh_abort_handler = tcm_loop_abort_task, + .eh_device_reset_handler = tcm_loop_device_reset, + .eh_target_reset_handler = tcm_loop_target_reset, + .this_id = -1, + .sg_tablesize = 256, + .max_sectors = 0xFFFF, + .dma_boundary = PAGE_SIZE - 1, + .module = THIS_MODULE, + .track_queue_depth = 1, + .cmd_size = sizeof(struct tcm_loop_cmd), +}; + +static int tcm_loop_driver_probe(struct device *dev) +{ + struct tcm_loop_hba *tl_hba; + struct Scsi_Host *sh; + int error, host_prot; + + tl_hba = to_tcm_loop_hba(dev); + + sh = scsi_host_alloc(&tcm_loop_driver_template, + sizeof(struct tcm_loop_hba)); + if (!sh) { + pr_err("Unable to allocate struct scsi_host\n"); + return -ENODEV; + } + tl_hba->sh = sh; + + /* + * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata + */ + *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; + /* + * Setup single ID, Channel and LUN for now.. + */ + sh->max_id = 2; + sh->max_lun = 0; + sh->max_channel = 0; + sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; + sh->nr_hw_queues = tcm_loop_nr_hw_queues; + sh->can_queue = tcm_loop_can_queue; + sh->cmd_per_lun = tcm_loop_cmd_per_lun; + + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(sh, host_prot); + scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); + + error = scsi_add_host(sh, &tl_hba->dev); + if (error) { + pr_err("%s: scsi_add_host failed\n", __func__); + scsi_host_put(sh); + return -ENODEV; + } + return 0; +} + +static void tcm_loop_driver_remove(struct device *dev) +{ + struct tcm_loop_hba *tl_hba; + struct Scsi_Host *sh; + + tl_hba = to_tcm_loop_hba(dev); + sh = tl_hba->sh; + + scsi_remove_host(sh); + scsi_host_put(sh); +} + +static void tcm_loop_release_adapter(struct device *dev) +{ + struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); + + kfree(tl_hba); +} + +/* + * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c + */ +static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) +{ + int ret; + + tl_hba->dev.bus = &tcm_loop_lld_bus; + tl_hba->dev.parent = tcm_loop_primary; + tl_hba->dev.release = &tcm_loop_release_adapter; + dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); + + ret = device_register(&tl_hba->dev); + if (ret) { + pr_err("device_register() failed for tl_hba->dev: %d\n", ret); + put_device(&tl_hba->dev); + return -ENODEV; + } + + return 0; +} + +/* + * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated + * tcm_loop SCSI bus. + */ +static int tcm_loop_alloc_core_bus(void) +{ + int ret; + + tcm_loop_primary = root_device_register("tcm_loop_0"); + if (IS_ERR(tcm_loop_primary)) { + pr_err("Unable to allocate tcm_loop_primary\n"); + return PTR_ERR(tcm_loop_primary); + } + + ret = bus_register(&tcm_loop_lld_bus); + if (ret) { + pr_err("bus_register() failed for tcm_loop_lld_bus\n"); + goto dev_unreg; + } + + ret = driver_register(&tcm_loop_driverfs); + if (ret) { + pr_err("driver_register() failed for tcm_loop_driverfs\n"); + goto bus_unreg; + } + + pr_debug("Initialized TCM Loop Core Bus\n"); + return ret; + +bus_unreg: + bus_unregister(&tcm_loop_lld_bus); +dev_unreg: + root_device_unregister(tcm_loop_primary); + return ret; +} + +static void tcm_loop_release_core_bus(void) +{ + driver_unregister(&tcm_loop_driverfs); + bus_unregister(&tcm_loop_lld_bus); + root_device_unregister(tcm_loop_primary); + + pr_debug("Releasing TCM Loop Core BUS\n"); +} + +static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) +{ + return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); +} + +static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) +{ + /* + * Return the passed NAA identifier for the Target Port + */ + return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; +} + +static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) +{ + /* + * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 + * to represent the SCSI Target Port. + */ + return tl_tpg(se_tpg)->tl_tpgt; +} + +/* + * Returning (1) here allows for target_core_mod struct se_node_acl to be generated + * based upon the incoming fabric dependent SCSI Initiator Port + */ +static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, + tl_se_tpg); + return tl_tpg->tl_fabric_prot_type; +} + +static u32 tcm_loop_sess_get_index(struct se_session *se_sess) +{ + return 1; +} + +static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) +{ + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + + return tl_cmd->sc_cmd_state; +} + +static int tcm_loop_write_pending(struct se_cmd *se_cmd) +{ + /* + * Since Linux/SCSI has already sent down a struct scsi_cmnd + * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array + * memory, and memory has already been mapped to struct se_cmd->t_mem_list + * format with transport_generic_map_mem_to_cmd(). + * + * We now tell TCM to add this WRITE CDB directly into the TCM storage + * object execution queue. + */ + target_execute_cmd(se_cmd); + return 0; +} + +static int tcm_loop_queue_data_or_status(const char *func, + struct se_cmd *se_cmd, u8 scsi_status) +{ + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + struct scsi_cmnd *sc = tl_cmd->sc; + + pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", + func, sc, sc->cmnd[0]); + + if (se_cmd->sense_buffer && + ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || + (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { + + memcpy(sc->sense_buffer, se_cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE); + sc->result = SAM_STAT_CHECK_CONDITION; + } else + sc->result = scsi_status; + + set_host_byte(sc, DID_OK); + if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || + (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) + scsi_set_resid(sc, se_cmd->residual_count); + return 0; +} + +static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) +{ + return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD); +} + +static int tcm_loop_queue_status(struct se_cmd *se_cmd) +{ + return tcm_loop_queue_data_or_status(__func__, + se_cmd, se_cmd->scsi_status); +} + +static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) +{ + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + + /* Wake up tcm_loop_issue_tmr(). */ + complete(&tl_cmd->tmr_done); +} + +static void tcm_loop_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + +static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) +{ + switch (tl_hba->tl_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +/* Start items for tcm_loop_port_cit */ + +static int tcm_loop_port_link( + struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; + + atomic_inc_mb(&tl_tpg->tl_tpg_port_count); + /* + * Add Linux/SCSI struct scsi_device by HCTL + */ + scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); + + pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); + return 0; +} + +static void tcm_loop_port_unlink( + struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct scsi_device *sd; + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + + tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); + tl_hba = tl_tpg->tl_hba; + + sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, + se_lun->unpacked_lun); + if (!sd) { + pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n", + 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); + return; + } + /* + * Remove Linux/SCSI struct scsi_device by HCTL + */ + scsi_remove_device(sd); + scsi_device_put(sd); + + atomic_dec_mb(&tl_tpg->tl_tpg_port_count); + + pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); +} + +/* End items for tcm_loop_port_cit */ + +static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show( + struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, + tl_se_tpg); + + return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); +} + +static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store( + struct config_item *item, const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, + tl_se_tpg); + unsigned long val; + int ret = kstrtoul(page, 0, &val); + + if (ret) { + pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); + return ret; + } + if (val != 0 && val != 1 && val != 3) { + pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); + return -EINVAL; + } + tl_tpg->tl_fabric_prot_type = val; + + return count; +} + +CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type); + +static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { + &tcm_loop_tpg_attrib_attr_fabric_prot_type, + NULL, +}; + +/* Start items for tcm_loop_nexus_cit */ + +static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg, + struct se_session *se_sess, void *p) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + + tl_tpg->tl_nexus = p; + return 0; +} + +static int tcm_loop_make_nexus( + struct tcm_loop_tpg *tl_tpg, + const char *name) +{ + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; + struct tcm_loop_nexus *tl_nexus; + int ret; + + if (tl_tpg->tl_nexus) { + pr_debug("tl_tpg->tl_nexus already exists\n"); + return -EEXIST; + } + + tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL); + if (!tl_nexus) + return -ENOMEM; + + tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0, + TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, + name, tl_nexus, tcm_loop_alloc_sess_cb); + if (IS_ERR(tl_nexus->se_sess)) { + ret = PTR_ERR(tl_nexus->se_sess); + kfree(tl_nexus); + return ret; + } + + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n", + tcm_loop_dump_proto_id(tl_hba), name); + return 0; +} + +static int tcm_loop_drop_nexus( + struct tcm_loop_tpg *tpg) +{ + struct se_session *se_sess; + struct tcm_loop_nexus *tl_nexus; + + tl_nexus = tpg->tl_nexus; + if (!tl_nexus) + return -ENODEV; + + se_sess = tl_nexus->se_sess; + if (!se_sess) + return -ENODEV; + + if (atomic_read(&tpg->tl_tpg_port_count)) { + pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n", + atomic_read(&tpg->tl_tpg_port_count)); + return -EPERM; + } + + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n", + tcm_loop_dump_proto_id(tpg->tl_hba), + tl_nexus->se_sess->se_node_acl->initiatorname); + /* + * Release the SCSI I_T Nexus to the emulated Target Port + */ + target_remove_session(se_sess); + tpg->tl_nexus = NULL; + kfree(tl_nexus); + return 0; +} + +/* End items for tcm_loop_nexus_cit */ + +static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + struct tcm_loop_nexus *tl_nexus; + ssize_t ret; + + tl_nexus = tl_tpg->tl_nexus; + if (!tl_nexus) + return -ENODEV; + + ret = snprintf(page, PAGE_SIZE, "%s\n", + tl_nexus->se_sess->se_node_acl->initiatorname); + + return ret; +} + +static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; + unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; + int ret; + /* + * Shutdown the active I_T nexus if 'NULL' is passed.. + */ + if (!strncmp(page, "NULL", 4)) { + ret = tcm_loop_drop_nexus(tl_tpg); + return (!ret) ? count : ret; + } + /* + * Otherwise make sure the passed virtual Initiator port WWN matches + * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call + * tcm_loop_make_nexus() + */ + if (strlen(page) >= TL_WWN_ADDR_LEN) { + pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n", + page, TL_WWN_ADDR_LEN); + return -EINVAL; + } + snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); + + ptr = strstr(i_port, "naa."); + if (ptr) { + if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { + pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n", + i_port, tcm_loop_dump_proto_id(tl_hba)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + ptr = strstr(i_port, "fc."); + if (ptr) { + if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { + pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n", + i_port, tcm_loop_dump_proto_id(tl_hba)); + return -EINVAL; + } + port_ptr = &i_port[3]; /* Skip over "fc." */ + goto check_newline; + } + ptr = strstr(i_port, "iqn."); + if (ptr) { + if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { + pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n", + i_port, tcm_loop_dump_proto_id(tl_hba)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + pr_err("Unable to locate prefix for emulated Initiator Port: %s\n", + i_port); + return -EINVAL; + /* + * Clear any trailing newline for the NAA WWN + */ +check_newline: + if (i_port[strlen(i_port)-1] == '\n') + i_port[strlen(i_port)-1] = '\0'; + + ret = tcm_loop_make_nexus(tl_tpg, port_ptr); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + const char *status = NULL; + ssize_t ret = -EINVAL; + + switch (tl_tpg->tl_transport_status) { + case TCM_TRANSPORT_ONLINE: + status = "online"; + break; + case TCM_TRANSPORT_OFFLINE: + status = "offline"; + break; + default: + break; + } + + if (status) + ret = snprintf(page, PAGE_SIZE, "%s\n", status); + + return ret; +} + +static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + + if (!strncmp(page, "online", 6)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return count; + } + if (!strncmp(page, "offline", 7)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; + if (tl_tpg->tl_nexus) { + struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess; + + core_allocate_nexus_loss_ua(tl_sess->se_node_acl); + } + return count; + } + return -EINVAL; +} + +static ssize_t tcm_loop_tpg_address_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; + + return snprintf(page, PAGE_SIZE, "%d:0:%d\n", + tl_hba->sh->host_no, tl_tpg->tl_tpgt); +} + +CONFIGFS_ATTR(tcm_loop_tpg_, nexus); +CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); +CONFIGFS_ATTR_RO(tcm_loop_tpg_, address); + +static struct configfs_attribute *tcm_loop_tpg_attrs[] = { + &tcm_loop_tpg_attr_nexus, + &tcm_loop_tpg_attr_transport_status, + &tcm_loop_tpg_attr_address, + NULL, +}; + +/* Start items for tcm_loop_naa_cit */ + +static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn, + const char *name) +{ + struct tcm_loop_hba *tl_hba = container_of(wwn, + struct tcm_loop_hba, tl_hba_wwn); + struct tcm_loop_tpg *tl_tpg; + int ret; + unsigned long tpgt; + + if (strstr(name, "tpgt_") != name) { + pr_err("Unable to locate \"tpgt_#\" directory group\n"); + return ERR_PTR(-EINVAL); + } + if (kstrtoul(name+5, 10, &tpgt)) + return ERR_PTR(-EINVAL); + + if (tpgt >= TL_TPGS_PER_HBA) { + pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n", + tpgt, TL_TPGS_PER_HBA); + return ERR_PTR(-EINVAL); + } + tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; + tl_tpg->tl_hba = tl_hba; + tl_tpg->tl_tpgt = tpgt; + /* + * Register the tl_tpg as a emulated TCM Target Endpoint + */ + ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); + if (ret < 0) + return ERR_PTR(-ENOMEM); + + pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n", + tcm_loop_dump_proto_id(tl_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); + return &tl_tpg->tl_se_tpg; +} + +static void tcm_loop_drop_naa_tpg( + struct se_portal_group *se_tpg) +{ + struct se_wwn *wwn = se_tpg->se_tpg_wwn; + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + struct tcm_loop_hba *tl_hba; + unsigned short tpgt; + + tl_hba = tl_tpg->tl_hba; + tpgt = tl_tpg->tl_tpgt; + /* + * Release the I_T Nexus for the Virtual target link if present + */ + tcm_loop_drop_nexus(tl_tpg); + /* + * Deregister the tl_tpg as a emulated TCM Target Endpoint + */ + core_tpg_deregister(se_tpg); + + tl_tpg->tl_hba = NULL; + tl_tpg->tl_tpgt = 0; + + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n", + tcm_loop_dump_proto_id(tl_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); +} + +/* End items for tcm_loop_naa_cit */ + +/* Start items for tcm_loop_cit */ + +static struct se_wwn *tcm_loop_make_scsi_hba( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_loop_hba *tl_hba; + struct Scsi_Host *sh; + char *ptr; + int ret, off = 0; + + tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL); + if (!tl_hba) + return ERR_PTR(-ENOMEM); + + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (!ptr) { + pr_err("Unable to locate prefix for emulated Target Port: %s\n", + name); + ret = -EINVAL; + goto out; + } + tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; + +check_len: + if (strlen(name) >= TL_WWN_ADDR_LEN) { + pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n", + name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); + ret = -EINVAL; + goto out; + } + snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); + + /* + * Call device_register(tl_hba->dev) to register the emulated + * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after + * device_register() callbacks in tcm_loop_driver_probe() + */ + ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); + if (ret) + return ERR_PTR(ret); + + sh = tl_hba->sh; + tcm_loop_hba_no_cnt++; + pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", + tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); + return &tl_hba->tl_hba_wwn; +out: + kfree(tl_hba); + return ERR_PTR(ret); +} + +static void tcm_loop_drop_scsi_hba( + struct se_wwn *wwn) +{ + struct tcm_loop_hba *tl_hba = container_of(wwn, + struct tcm_loop_hba, tl_hba_wwn); + + pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", + tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, + tl_hba->sh->host_no); + /* + * Call device_unregister() on the original tl_hba->dev. + * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will + * release *tl_hba; + */ + device_unregister(&tl_hba->dev); +} + +/* Start items for tcm_loop_cit */ +static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page) +{ + return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); +} + +CONFIGFS_ATTR_RO(tcm_loop_wwn_, version); + +static struct configfs_attribute *tcm_loop_wwn_attrs[] = { + &tcm_loop_wwn_attr_version, + NULL, +}; + +/* End items for tcm_loop_cit */ + +static const struct target_core_fabric_ops loop_ops = { + .module = THIS_MODULE, + .fabric_name = "loopback", + .tpg_get_wwn = tcm_loop_get_endpoint_wwn, + .tpg_get_tag = tcm_loop_get_tag, + .tpg_check_demo_mode = tcm_loop_check_demo_mode, + .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, + .check_stop_free = tcm_loop_check_stop_free, + .release_cmd = tcm_loop_release_cmd, + .sess_get_index = tcm_loop_sess_get_index, + .write_pending = tcm_loop_write_pending, + .get_cmd_state = tcm_loop_get_cmd_state, + .queue_data_in = tcm_loop_queue_data_in, + .queue_status = tcm_loop_queue_status, + .queue_tm_rsp = tcm_loop_queue_tm_rsp, + .aborted_task = tcm_loop_aborted_task, + .fabric_make_wwn = tcm_loop_make_scsi_hba, + .fabric_drop_wwn = tcm_loop_drop_scsi_hba, + .fabric_make_tpg = tcm_loop_make_naa_tpg, + .fabric_drop_tpg = tcm_loop_drop_naa_tpg, + .fabric_post_link = tcm_loop_port_link, + .fabric_pre_unlink = tcm_loop_port_unlink, + .tfc_wwn_attrs = tcm_loop_wwn_attrs, + .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, + .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, +}; + +static int __init tcm_loop_fabric_init(void) +{ + int ret = -ENOMEM; + + tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", + sizeof(struct tcm_loop_cmd), + __alignof__(struct tcm_loop_cmd), + 0, NULL); + if (!tcm_loop_cmd_cache) { + pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n"); + goto out; + } + + ret = tcm_loop_alloc_core_bus(); + if (ret) + goto out_destroy_cache; + + ret = target_register_template(&loop_ops); + if (ret) + goto out_release_core_bus; + + return 0; + +out_release_core_bus: + tcm_loop_release_core_bus(); +out_destroy_cache: + kmem_cache_destroy(tcm_loop_cmd_cache); +out: + return ret; +} + +static void __exit tcm_loop_fabric_exit(void) +{ + target_unregister_template(&loop_ops); + tcm_loop_release_core_bus(); + kmem_cache_destroy(tcm_loop_cmd_cache); +} + +MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); +MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); +MODULE_LICENSE("GPL"); +module_init(tcm_loop_fabric_init); +module_exit(tcm_loop_fabric_exit); diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h new file mode 100644 index 0000000000..437663b390 --- /dev/null +++ b/drivers/target/loopback/tcm_loop.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/types.h> +#include <linux/device.h> +#include <target/target_core_base.h> /* struct se_cmd */ + +#define TCM_LOOP_VERSION "v2.1-rc2" +#define TL_WWN_ADDR_LEN 256 +#define TL_TPGS_PER_HBA 32 + +struct tcm_loop_cmd { + /* State of Linux/SCSI CDB+Data descriptor */ + u32 sc_cmd_state; + /* Tagged command queueing */ + u32 sc_cmd_tag; + /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ + struct scsi_cmnd *sc; + /* The TCM I/O descriptor that is accessed via container_of() */ + struct se_cmd tl_se_cmd; + struct completion tmr_done; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; +}; + +struct tcm_loop_nexus { + /* + * Pointer to TCM session for I_T Nexus + */ + struct se_session *se_sess; +}; + +#define TCM_TRANSPORT_ONLINE 0 +#define TCM_TRANSPORT_OFFLINE 1 + +struct tcm_loop_tpg { + unsigned short tl_tpgt; + unsigned short tl_transport_status; + enum target_prot_type tl_fabric_prot_type; + atomic_t tl_tpg_port_count; + struct se_portal_group tl_se_tpg; + struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; +}; + +struct tcm_loop_hba { + u8 tl_proto_id; + unsigned char tl_wwn_address[TL_WWN_ADDR_LEN]; + struct se_hba_s *se_hba; + struct se_lun *tl_hba_lun; + struct se_port *tl_hba_lun_sep; + struct device dev; + struct Scsi_Host *sh; + struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; + struct se_wwn tl_hba_wwn; +}; diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig new file mode 100644 index 0000000000..53a1c75f56 --- /dev/null +++ b/drivers/target/sbp/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SBP_TARGET + tristate "FireWire SBP-2 fabric module" + depends on FIREWIRE + help + Say Y or M here to enable SCSI target functionality over FireWire. + This enables you to expose SCSI devices to other nodes on the FireWire + bus, for example hard disks. Similar to FireWire Target Disk mode on + many Apple computers. + + To compile this driver as a module, say M here: The module will be + called sbp-target. diff --git a/drivers/target/sbp/Makefile b/drivers/target/sbp/Makefile new file mode 100644 index 0000000000..766f236900 --- /dev/null +++ b/drivers/target/sbp/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SBP_TARGET) += sbp_target.o diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c new file mode 100644 index 0000000000..2a761bc091 --- /dev/null +++ b/drivers/target/sbp/sbp_target.c @@ -0,0 +1,2296 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SBP2 target driver (SCSI over IEEE1394 in target mode) + * + * Copyright (C) 2011 Chris Boot <bootc@bootc.net> + */ + +#define KMSG_COMPONENT "sbp_target" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> +#include <asm/unaligned.h> + +#include "sbp_target.h" + +/* FireWire address region for management and command block address handlers */ +static const struct fw_address_region sbp_register_region = { + .start = CSR_REGISTER_BASE + 0x10000, + .end = 0x1000000000000ULL, +}; + +static const u32 sbp_unit_directory_template[] = { + 0x1200609e, /* unit_specifier_id: NCITS/T10 */ + 0x13010483, /* unit_sw_version: 1155D Rev 4 */ + 0x3800609e, /* command_set_specifier_id: NCITS/T10 */ + 0x390104d8, /* command_set: SPC-2 */ + 0x3b000000, /* command_set_revision: 0 */ + 0x3c000001, /* firmware_revision: 1 */ +}; + +#define SESSION_MAINTENANCE_INTERVAL HZ + +static atomic_t login_id = ATOMIC_INIT(0); + +static void session_maintenance_work(struct work_struct *); +static int sbp_run_transaction(struct fw_card *, int, int, int, int, + unsigned long long, void *, size_t); + +static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) +{ + int ret; + __be32 high, low; + + ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, + req->node_addr, req->generation, req->speed, + (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4, + &high, sizeof(high)); + if (ret != RCODE_COMPLETE) + return ret; + + ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, + req->node_addr, req->generation, req->speed, + (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4, + &low, sizeof(low)); + if (ret != RCODE_COMPLETE) + return ret; + + *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low); + + return RCODE_COMPLETE; +} + +static struct sbp_session *sbp_session_find_by_guid( + struct sbp_tpg *tpg, u64 guid) +{ + struct se_session *se_sess; + struct sbp_session *sess, *found = NULL; + + spin_lock_bh(&tpg->se_tpg.session_lock); + list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { + sess = se_sess->fabric_sess_ptr; + if (sess->guid == guid) + found = sess; + } + spin_unlock_bh(&tpg->se_tpg.session_lock); + + return found; +} + +static struct sbp_login_descriptor *sbp_login_find_by_lun( + struct sbp_session *session, u32 unpacked_lun) +{ + struct sbp_login_descriptor *login, *found = NULL; + + spin_lock_bh(&session->lock); + list_for_each_entry(login, &session->login_list, link) { + if (login->login_lun == unpacked_lun) + found = login; + } + spin_unlock_bh(&session->lock); + + return found; +} + +static int sbp_login_count_all_by_lun( + struct sbp_tpg *tpg, + u32 unpacked_lun, + int exclusive) +{ + struct se_session *se_sess; + struct sbp_session *sess; + struct sbp_login_descriptor *login; + int count = 0; + + spin_lock_bh(&tpg->se_tpg.session_lock); + list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { + sess = se_sess->fabric_sess_ptr; + + spin_lock_bh(&sess->lock); + list_for_each_entry(login, &sess->login_list, link) { + if (login->login_lun != unpacked_lun) + continue; + + if (!exclusive || login->exclusive) + count++; + } + spin_unlock_bh(&sess->lock); + } + spin_unlock_bh(&tpg->se_tpg.session_lock); + + return count; +} + +static struct sbp_login_descriptor *sbp_login_find_by_id( + struct sbp_tpg *tpg, int login_id) +{ + struct se_session *se_sess; + struct sbp_session *sess; + struct sbp_login_descriptor *login, *found = NULL; + + spin_lock_bh(&tpg->se_tpg.session_lock); + list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { + sess = se_sess->fabric_sess_ptr; + + spin_lock_bh(&sess->lock); + list_for_each_entry(login, &sess->login_list, link) { + if (login->login_id == login_id) + found = login; + } + spin_unlock_bh(&sess->lock); + } + spin_unlock_bh(&tpg->se_tpg.session_lock); + + return found; +} + +static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err) +{ + struct se_portal_group *se_tpg = &tpg->se_tpg; + struct se_lun *se_lun; + + rcu_read_lock(); + hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) { + if (se_lun->unpacked_lun == login_lun) { + rcu_read_unlock(); + *err = 0; + return login_lun; + } + } + rcu_read_unlock(); + + *err = -ENODEV; + return login_lun; +} + +static struct sbp_session *sbp_session_create( + struct sbp_tpg *tpg, + u64 guid) +{ + struct sbp_session *sess; + int ret; + char guid_str[17]; + + snprintf(guid_str, sizeof(guid_str), "%016llx", guid); + + sess = kmalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&sess->lock); + INIT_LIST_HEAD(&sess->login_list); + INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); + sess->guid = guid; + + sess->se_sess = target_setup_session(&tpg->se_tpg, 128, + sizeof(struct sbp_target_request), + TARGET_PROT_NORMAL, guid_str, + sess, NULL); + if (IS_ERR(sess->se_sess)) { + pr_err("failed to init se_session\n"); + ret = PTR_ERR(sess->se_sess); + kfree(sess); + return ERR_PTR(ret); + } + + return sess; +} + +static void sbp_session_release(struct sbp_session *sess, bool cancel_work) +{ + spin_lock_bh(&sess->lock); + if (!list_empty(&sess->login_list)) { + spin_unlock_bh(&sess->lock); + return; + } + spin_unlock_bh(&sess->lock); + + if (cancel_work) + cancel_delayed_work_sync(&sess->maint_work); + + target_remove_session(sess->se_sess); + + if (sess->card) + fw_card_put(sess->card); + + kfree(sess); +} + +static void sbp_target_agent_unregister(struct sbp_target_agent *); + +static void sbp_login_release(struct sbp_login_descriptor *login, + bool cancel_work) +{ + struct sbp_session *sess = login->sess; + + /* FIXME: abort/wait on tasks */ + + sbp_target_agent_unregister(login->tgt_agt); + + if (sess) { + spin_lock_bh(&sess->lock); + list_del(&login->link); + spin_unlock_bh(&sess->lock); + + sbp_session_release(sess, cancel_work); + } + + kfree(login); +} + +static struct sbp_target_agent *sbp_target_agent_register( + struct sbp_login_descriptor *); + +static void sbp_management_request_login( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + struct sbp_tport *tport = agent->tport; + struct sbp_tpg *tpg = tport->tpg; + struct sbp_session *sess; + struct sbp_login_descriptor *login; + struct sbp_login_response_block *response; + u64 guid; + u32 unpacked_lun; + int login_response_len, ret; + + unpacked_lun = sbp_get_lun_from_tpg(tpg, + LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret); + if (ret) { + pr_notice("login to unknown LUN: %d\n", + LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP)); + return; + } + + ret = read_peer_guid(&guid, req); + if (ret != RCODE_COMPLETE) { + pr_warn("failed to read peer GUID: %d\n", ret); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + return; + } + + pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", + unpacked_lun, guid); + + sess = sbp_session_find_by_guid(tpg, guid); + if (sess) { + login = sbp_login_find_by_lun(sess, unpacked_lun); + if (login) { + pr_notice("initiator already logged-in\n"); + + /* + * SBP-2 R4 says we should return access denied, but + * that can confuse initiators. Instead we need to + * treat this like a reconnect, but send the login + * response block like a fresh login. + * + * This is required particularly in the case of Apple + * devices booting off the FireWire target, where + * the firmware has an active login to the target. When + * the OS takes control of the session it issues its own + * LOGIN rather than a RECONNECT. To avoid the machine + * waiting until the reconnect_hold expires, we can skip + * the ACCESS_DENIED errors to speed things up. + */ + + goto already_logged_in; + } + } + + /* + * check exclusive bit in login request + * reject with access_denied if any logins present + */ + if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && + sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) { + pr_warn("refusing exclusive login with other active logins\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + /* + * check exclusive bit in any existing login descriptor + * reject with access_denied if any exclusive logins present + */ + if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) { + pr_warn("refusing login while another exclusive login present\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + /* + * check we haven't exceeded the number of allowed logins + * reject with resources_unavailable if we have + */ + if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >= + tport->max_logins_per_lun) { + pr_warn("max number of logins reached\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + if (!sess) { + sess = sbp_session_create(tpg, guid); + if (IS_ERR(sess)) { + switch (PTR_ERR(sess)) { + case -EPERM: + ret = SBP_STATUS_ACCESS_DENIED; + break; + default: + ret = SBP_STATUS_RESOURCES_UNAVAIL; + break; + } + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(ret)); + return; + } + + sess->node_id = req->node_addr; + sess->card = fw_card_get(req->card); + sess->generation = req->generation; + sess->speed = req->speed; + + schedule_delayed_work(&sess->maint_work, + SESSION_MAINTENANCE_INTERVAL); + } + + /* only take the latest reconnect_hold into account */ + sess->reconnect_hold = min( + 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), + tport->max_reconnect_timeout) - 1; + + login = kmalloc(sizeof(*login), GFP_KERNEL); + if (!login) { + pr_err("failed to allocate login descriptor\n"); + + sbp_session_release(sess, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + login->sess = sess; + login->login_lun = unpacked_lun; + login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); + login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); + login->login_id = atomic_inc_return(&login_id); + + login->tgt_agt = sbp_target_agent_register(login); + if (IS_ERR(login->tgt_agt)) { + ret = PTR_ERR(login->tgt_agt); + pr_err("failed to map command block handler: %d\n", ret); + + sbp_session_release(sess, true); + kfree(login); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + spin_lock_bh(&sess->lock); + list_add_tail(&login->link, &sess->login_list); + spin_unlock_bh(&sess->lock); + +already_logged_in: + response = kzalloc(sizeof(*response), GFP_KERNEL); + if (!response) { + pr_err("failed to allocate login response block\n"); + + sbp_login_release(login, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + login_response_len = clamp_val( + LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), + 12, sizeof(*response)); + response->misc = cpu_to_be32( + ((login_response_len & 0xffff) << 16) | + (login->login_id & 0xffff)); + response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff); + addr_to_sbp2_pointer(login->tgt_agt->handler.offset, + &response->command_block_agent); + + ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST, + sess->node_id, sess->generation, sess->speed, + sbp2_pointer_to_addr(&req->orb.ptr2), response, + login_response_len); + if (ret != RCODE_COMPLETE) { + pr_debug("failed to write login response block: %x\n", ret); + + kfree(response); + sbp_login_release(login, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + return; + } + + kfree(response); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static void sbp_management_request_query_logins( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + pr_notice("QUERY LOGINS not implemented\n"); + /* FIXME: implement */ + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); +} + +static void sbp_management_request_reconnect( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + struct sbp_tport *tport = agent->tport; + struct sbp_tpg *tpg = tport->tpg; + int ret; + u64 guid; + struct sbp_login_descriptor *login; + + ret = read_peer_guid(&guid, req); + if (ret != RCODE_COMPLETE) { + pr_warn("failed to read peer GUID: %d\n", ret); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + return; + } + + pr_notice("mgt_agent RECONNECT from %016llx\n", guid); + + login = sbp_login_find_by_id(tpg, + RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); + + if (!login) { + pr_err("mgt_agent RECONNECT unknown login ID\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + if (login->sess->guid != guid) { + pr_err("mgt_agent RECONNECT login GUID doesn't match\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + spin_lock_bh(&login->sess->lock); + if (login->sess->card) + fw_card_put(login->sess->card); + + /* update the node details */ + login->sess->generation = req->generation; + login->sess->node_id = req->node_addr; + login->sess->card = fw_card_get(req->card); + login->sess->speed = req->speed; + spin_unlock_bh(&login->sess->lock); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static void sbp_management_request_logout( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + struct sbp_tport *tport = agent->tport; + struct sbp_tpg *tpg = tport->tpg; + int id; + struct sbp_login_descriptor *login; + + id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); + + login = sbp_login_find_by_id(tpg, id); + if (!login) { + pr_warn("cannot find login: %d\n", id); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN)); + return; + } + + pr_info("mgt_agent LOGOUT from LUN %d session %d\n", + login->login_lun, login->login_id); + + if (req->node_addr != login->sess->node_id) { + pr_warn("logout from different node ID\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + sbp_login_release(login, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static void session_check_for_reset(struct sbp_session *sess) +{ + bool card_valid = false; + + spin_lock_bh(&sess->lock); + + if (sess->card) { + spin_lock_irq(&sess->card->lock); + card_valid = (sess->card->local_node != NULL); + spin_unlock_irq(&sess->card->lock); + + if (!card_valid) { + fw_card_put(sess->card); + sess->card = NULL; + } + } + + if (!card_valid || (sess->generation != sess->card->generation)) { + pr_info("Waiting for reconnect from node: %016llx\n", + sess->guid); + + sess->node_id = -1; + sess->reconnect_expires = get_jiffies_64() + + ((sess->reconnect_hold + 1) * HZ); + } + + spin_unlock_bh(&sess->lock); +} + +static void session_reconnect_expired(struct sbp_session *sess) +{ + struct sbp_login_descriptor *login, *temp; + LIST_HEAD(login_list); + + pr_info("Reconnect timer expired for node: %016llx\n", sess->guid); + + spin_lock_bh(&sess->lock); + list_for_each_entry_safe(login, temp, &sess->login_list, link) { + login->sess = NULL; + list_move_tail(&login->link, &login_list); + } + spin_unlock_bh(&sess->lock); + + list_for_each_entry_safe(login, temp, &login_list, link) { + list_del(&login->link); + sbp_login_release(login, false); + } + + sbp_session_release(sess, false); +} + +static void session_maintenance_work(struct work_struct *work) +{ + struct sbp_session *sess = container_of(work, struct sbp_session, + maint_work.work); + + /* could be called while tearing down the session */ + spin_lock_bh(&sess->lock); + if (list_empty(&sess->login_list)) { + spin_unlock_bh(&sess->lock); + return; + } + spin_unlock_bh(&sess->lock); + + if (sess->node_id != -1) { + /* check for bus reset and make node_id invalid */ + session_check_for_reset(sess); + + schedule_delayed_work(&sess->maint_work, + SESSION_MAINTENANCE_INTERVAL); + } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) { + /* still waiting for reconnect */ + schedule_delayed_work(&sess->maint_work, + SESSION_MAINTENANCE_INTERVAL); + } else { + /* reconnect timeout has expired */ + session_reconnect_expired(sess); + } +} + +static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + int state; + + switch (tcode) { + case TCODE_READ_QUADLET_REQUEST: + pr_debug("tgt_agent AGENT_STATE READ\n"); + + spin_lock_bh(&agent->lock); + state = agent->state; + spin_unlock_bh(&agent->lock); + + *(__be32 *)data = cpu_to_be32(state); + + return RCODE_COMPLETE; + + case TCODE_WRITE_QUADLET_REQUEST: + /* ignored */ + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + pr_debug("tgt_agent AGENT_RESET\n"); + spin_lock_bh(&agent->lock); + agent->state = AGENT_STATE_RESET; + spin_unlock_bh(&agent->lock); + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + struct sbp2_pointer *ptr = data; + + switch (tcode) { + case TCODE_WRITE_BLOCK_REQUEST: + spin_lock_bh(&agent->lock); + if (agent->state != AGENT_STATE_SUSPENDED && + agent->state != AGENT_STATE_RESET) { + spin_unlock_bh(&agent->lock); + pr_notice("Ignoring ORB_POINTER write while active.\n"); + return RCODE_CONFLICT_ERROR; + } + agent->state = AGENT_STATE_ACTIVE; + spin_unlock_bh(&agent->lock); + + agent->orb_pointer = sbp2_pointer_to_addr(ptr); + agent->doorbell = false; + + pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", + agent->orb_pointer); + + queue_work(system_unbound_wq, &agent->work); + + return RCODE_COMPLETE; + + case TCODE_READ_BLOCK_REQUEST: + pr_debug("tgt_agent ORB_POINTER READ\n"); + spin_lock_bh(&agent->lock); + addr_to_sbp2_pointer(agent->orb_pointer, ptr); + spin_unlock_bh(&agent->lock); + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + spin_lock_bh(&agent->lock); + if (agent->state != AGENT_STATE_SUSPENDED) { + spin_unlock_bh(&agent->lock); + pr_debug("Ignoring DOORBELL while active.\n"); + return RCODE_CONFLICT_ERROR; + } + agent->state = AGENT_STATE_ACTIVE; + spin_unlock_bh(&agent->lock); + + agent->doorbell = true; + + pr_debug("tgt_agent DOORBELL\n"); + + queue_work(system_unbound_wq, &agent->work); + + return RCODE_COMPLETE; + + case TCODE_READ_QUADLET_REQUEST: + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card, + int tcode, void *data, struct sbp_target_agent *agent) +{ + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n"); + /* ignored as we don't send unsolicited status */ + return RCODE_COMPLETE; + + case TCODE_READ_QUADLET_REQUEST: + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static void tgt_agent_rw(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + unsigned long long offset, void *data, size_t length, + void *callback_data) +{ + struct sbp_target_agent *agent = callback_data; + struct sbp_session *sess = agent->login->sess; + int sess_gen, sess_node, rcode; + + spin_lock_bh(&sess->lock); + sess_gen = sess->generation; + sess_node = sess->node_id; + spin_unlock_bh(&sess->lock); + + if (generation != sess_gen) { + pr_notice("ignoring request with wrong generation\n"); + rcode = RCODE_TYPE_ERROR; + goto out; + } + + if (source != sess_node) { + pr_notice("ignoring request from foreign node (%x != %x)\n", + source, sess_node); + rcode = RCODE_TYPE_ERROR; + goto out; + } + + /* turn offset into the offset from the start of the block */ + offset -= agent->handler.offset; + + if (offset == 0x00 && length == 4) { + /* AGENT_STATE */ + rcode = tgt_agent_rw_agent_state(card, tcode, data, agent); + } else if (offset == 0x04 && length == 4) { + /* AGENT_RESET */ + rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent); + } else if (offset == 0x08 && length == 8) { + /* ORB_POINTER */ + rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent); + } else if (offset == 0x10 && length == 4) { + /* DOORBELL */ + rcode = tgt_agent_rw_doorbell(card, tcode, data, agent); + } else if (offset == 0x14 && length == 4) { + /* UNSOLICITED_STATUS_ENABLE */ + rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode, + data, agent); + } else { + rcode = RCODE_ADDRESS_ERROR; + } + +out: + fw_send_response(card, request, rcode); +} + +static void sbp_handle_command(struct sbp_target_request *); +static int sbp_send_status(struct sbp_target_request *); +static void sbp_free_request(struct sbp_target_request *); + +static void tgt_agent_process_work(struct work_struct *work) +{ + struct sbp_target_request *req = + container_of(work, struct sbp_target_request, work); + + pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n", + req->orb_pointer, + sbp2_pointer_to_addr(&req->orb.next_orb), + sbp2_pointer_to_addr(&req->orb.data_descriptor), + be32_to_cpu(req->orb.misc)); + + if (req->orb_pointer >> 32) + pr_debug("ORB with high bits set\n"); + + switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { + case 0:/* Format specified by this standard */ + sbp_handle_command(req); + return; + case 1: /* Reserved for future standardization */ + case 2: /* Vendor-dependent */ + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_REQ_TYPE_NOTSUPP)); + sbp_send_status(req); + return; + case 3: /* Dummy ORB */ + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_DUMMY_ORB_COMPLETE)); + sbp_send_status(req); + return; + default: + BUG(); + } +} + +/* used to double-check we haven't been issued an AGENT_RESET */ +static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) +{ + bool active; + + spin_lock_bh(&agent->lock); + active = (agent->state == AGENT_STATE_ACTIVE); + spin_unlock_bh(&agent->lock); + + return active; +} + +static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess, + struct fw_card *card, u64 next_orb) +{ + struct se_session *se_sess = sess->se_sess; + struct sbp_target_request *req; + int tag, cpu; + + tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); + if (tag < 0) + return ERR_PTR(-ENOMEM); + + req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag]; + memset(req, 0, sizeof(*req)); + req->se_cmd.map_tag = tag; + req->se_cmd.map_cpu = cpu; + req->se_cmd.tag = next_orb; + + return req; +} + +static void tgt_agent_fetch_work(struct work_struct *work) +{ + struct sbp_target_agent *agent = + container_of(work, struct sbp_target_agent, work); + struct sbp_session *sess = agent->login->sess; + struct sbp_target_request *req; + int ret; + bool doorbell = agent->doorbell; + u64 next_orb = agent->orb_pointer; + + while (next_orb && tgt_agent_check_active(agent)) { + req = sbp_mgt_get_req(sess, sess->card, next_orb); + if (IS_ERR(req)) { + spin_lock_bh(&agent->lock); + agent->state = AGENT_STATE_DEAD; + spin_unlock_bh(&agent->lock); + return; + } + + req->login = agent->login; + req->orb_pointer = next_orb; + + req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( + req->orb_pointer >> 32)); + req->status.orb_low = cpu_to_be32( + req->orb_pointer & 0xfffffffc); + + /* read in the ORB */ + ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST, + sess->node_id, sess->generation, sess->speed, + req->orb_pointer, &req->orb, sizeof(req->orb)); + if (ret != RCODE_COMPLETE) { + pr_debug("tgt_orb fetch failed: %x\n", ret); + req->status.status |= cpu_to_be32( + STATUS_BLOCK_SRC( + STATUS_SRC_ORB_FINISHED) | + STATUS_BLOCK_RESP( + STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(1) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_UNSPECIFIED_ERROR)); + spin_lock_bh(&agent->lock); + agent->state = AGENT_STATE_DEAD; + spin_unlock_bh(&agent->lock); + + sbp_send_status(req); + return; + } + + /* check the next_ORB field */ + if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { + next_orb = 0; + req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( + STATUS_SRC_ORB_FINISHED)); + } else { + next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); + req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( + STATUS_SRC_ORB_CONTINUING)); + } + + if (tgt_agent_check_active(agent) && !doorbell) { + INIT_WORK(&req->work, tgt_agent_process_work); + queue_work(system_unbound_wq, &req->work); + } else { + /* don't process this request, just check next_ORB */ + sbp_free_request(req); + } + + spin_lock_bh(&agent->lock); + doorbell = agent->doorbell = false; + + /* check if we should carry on processing */ + if (next_orb) + agent->orb_pointer = next_orb; + else + agent->state = AGENT_STATE_SUSPENDED; + + spin_unlock_bh(&agent->lock); + } +} + +static struct sbp_target_agent *sbp_target_agent_register( + struct sbp_login_descriptor *login) +{ + struct sbp_target_agent *agent; + int ret; + + agent = kmalloc(sizeof(*agent), GFP_KERNEL); + if (!agent) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&agent->lock); + + agent->handler.length = 0x20; + agent->handler.address_callback = tgt_agent_rw; + agent->handler.callback_data = agent; + + agent->login = login; + agent->state = AGENT_STATE_RESET; + INIT_WORK(&agent->work, tgt_agent_fetch_work); + agent->orb_pointer = 0; + agent->doorbell = false; + + ret = fw_core_add_address_handler(&agent->handler, + &sbp_register_region); + if (ret < 0) { + kfree(agent); + return ERR_PTR(ret); + } + + return agent; +} + +static void sbp_target_agent_unregister(struct sbp_target_agent *agent) +{ + fw_core_remove_address_handler(&agent->handler); + cancel_work_sync(&agent->work); + kfree(agent); +} + +/* + * Simple wrapper around fw_run_transaction that retries the transaction several + * times in case of failure, with an exponential backoff. + */ +static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *payload, size_t length) +{ + int attempt, ret, delay; + + for (attempt = 1; attempt <= 5; attempt++) { + ret = fw_run_transaction(card, tcode, destination_id, + generation, speed, offset, payload, length); + + switch (ret) { + case RCODE_COMPLETE: + case RCODE_TYPE_ERROR: + case RCODE_ADDRESS_ERROR: + case RCODE_GENERATION: + return ret; + + default: + delay = 5 * attempt * attempt; + usleep_range(delay, delay * 2); + } + } + + return ret; +} + +/* + * Wrapper around sbp_run_transaction that gets the card, destination, + * generation and speed out of the request's session. + */ +static int sbp_run_request_transaction(struct sbp_target_request *req, + int tcode, unsigned long long offset, void *payload, + size_t length) +{ + struct sbp_login_descriptor *login = req->login; + struct sbp_session *sess = login->sess; + struct fw_card *card; + int node_id, generation, speed, ret; + + spin_lock_bh(&sess->lock); + card = fw_card_get(sess->card); + node_id = sess->node_id; + generation = sess->generation; + speed = sess->speed; + spin_unlock_bh(&sess->lock); + + ret = sbp_run_transaction(card, tcode, node_id, generation, speed, + offset, payload, length); + + fw_card_put(card); + + return ret; +} + +static int sbp_fetch_command(struct sbp_target_request *req) +{ + int ret, cmd_len, copy_len; + + cmd_len = scsi_command_size(req->orb.command_block); + + req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); + if (!req->cmd_buf) + return -ENOMEM; + + memcpy(req->cmd_buf, req->orb.command_block, + min_t(int, cmd_len, sizeof(req->orb.command_block))); + + if (cmd_len > sizeof(req->orb.command_block)) { + pr_debug("sbp_fetch_command: filling in long command\n"); + copy_len = cmd_len - sizeof(req->orb.command_block); + + ret = sbp_run_request_transaction(req, + TCODE_READ_BLOCK_REQUEST, + req->orb_pointer + sizeof(req->orb), + req->cmd_buf + sizeof(req->orb.command_block), + copy_len); + if (ret != RCODE_COMPLETE) + return -EIO; + } + + return 0; +} + +static int sbp_fetch_page_table(struct sbp_target_request *req) +{ + int pg_tbl_sz, ret; + struct sbp_page_table_entry *pg_tbl; + + if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) + return 0; + + pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * + sizeof(struct sbp_page_table_entry); + + pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL); + if (!pg_tbl) + return -ENOMEM; + + ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, + sbp2_pointer_to_addr(&req->orb.data_descriptor), + pg_tbl, pg_tbl_sz); + if (ret != RCODE_COMPLETE) { + kfree(pg_tbl); + return -EIO; + } + + req->pg_tbl = pg_tbl; + return 0; +} + +static void sbp_calc_data_length_direction(struct sbp_target_request *req, + u32 *data_len, enum dma_data_direction *data_dir) +{ + int data_size, direction, idx; + + data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); + direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); + + if (!data_size) { + *data_len = 0; + *data_dir = DMA_NONE; + return; + } + + *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (req->pg_tbl) { + *data_len = 0; + for (idx = 0; idx < data_size; idx++) { + *data_len += be16_to_cpu( + req->pg_tbl[idx].segment_length); + } + } else { + *data_len = data_size; + } +} + +static void sbp_handle_command(struct sbp_target_request *req) +{ + struct sbp_login_descriptor *login = req->login; + struct sbp_session *sess = login->sess; + int ret, unpacked_lun; + u32 data_length; + enum dma_data_direction data_dir; + + ret = sbp_fetch_command(req); + if (ret) { + pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); + goto err; + } + + ret = sbp_fetch_page_table(req); + if (ret) { + pr_debug("sbp_handle_command: fetch page table failed: %d\n", + ret); + goto err; + } + + unpacked_lun = req->login->login_lun; + sbp_calc_data_length_direction(req, &data_length, &data_dir); + + pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", + req->orb_pointer, unpacked_lun, data_length, data_dir); + + /* only used for printk until we do TMRs */ + req->se_cmd.tag = req->orb_pointer; + target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, + req->sense_buf, unpacked_lun, data_length, + TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF); + return; + +err: + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); +} + +/* + * DMA_TO_DEVICE = read from initiator (SCSI WRITE) + * DMA_FROM_DEVICE = write to initiator (SCSI READ) + */ +static int sbp_rw_data(struct sbp_target_request *req) +{ + struct sbp_session *sess = req->login->sess; + int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id, + generation, num_pte, length, tfr_length, + rcode = RCODE_COMPLETE; + struct sbp_page_table_entry *pte; + unsigned long long offset; + struct fw_card *card; + struct sg_mapping_iter iter; + + if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { + tcode = TCODE_WRITE_BLOCK_REQUEST; + sg_miter_flags = SG_MITER_FROM_SG; + } else { + tcode = TCODE_READ_BLOCK_REQUEST; + sg_miter_flags = SG_MITER_TO_SG; + } + + max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); + speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); + + pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); + if (pg_size) { + pr_err("sbp_run_transaction: page size ignored\n"); + } + + spin_lock_bh(&sess->lock); + card = fw_card_get(sess->card); + node_id = sess->node_id; + generation = sess->generation; + spin_unlock_bh(&sess->lock); + + if (req->pg_tbl) { + pte = req->pg_tbl; + num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); + + offset = 0; + length = 0; + } else { + pte = NULL; + num_pte = 0; + + offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); + length = req->se_cmd.data_length; + } + + sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, + sg_miter_flags); + + while (length || num_pte) { + if (!length) { + offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 | + be32_to_cpu(pte->segment_base_lo); + length = be16_to_cpu(pte->segment_length); + + pte++; + num_pte--; + } + + sg_miter_next(&iter); + + tfr_length = min3(length, max_payload, (int)iter.length); + + /* FIXME: take page_size into account */ + + rcode = sbp_run_transaction(card, tcode, node_id, + generation, speed, + offset, iter.addr, tfr_length); + + if (rcode != RCODE_COMPLETE) + break; + + length -= tfr_length; + offset += tfr_length; + iter.consumed = tfr_length; + } + + sg_miter_stop(&iter); + fw_card_put(card); + + if (rcode == RCODE_COMPLETE) { + WARN_ON(length != 0); + return 0; + } else { + return -EIO; + } +} + +static int sbp_send_status(struct sbp_target_request *req) +{ + int rc, ret = 0, length; + struct sbp_login_descriptor *login = req->login; + + length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; + + rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, + login->status_fifo_addr, &req->status, length); + if (rc != RCODE_COMPLETE) { + pr_debug("sbp_send_status: write failed: 0x%x\n", rc); + ret = -EIO; + goto put_ref; + } + + pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", + req->orb_pointer); + /* + * Drop the extra ACK_KREF reference taken by target_submit_cmd() + * ahead of sbp_check_stop_free() -> transport_generic_free_cmd() + * final se_cmd->cmd_kref put. + */ +put_ref: + target_put_sess_cmd(&req->se_cmd); + return ret; +} + +static void sbp_sense_mangle(struct sbp_target_request *req) +{ + struct se_cmd *se_cmd = &req->se_cmd; + u8 *sense = req->sense_buf; + u8 *status = req->status.data; + + WARN_ON(se_cmd->scsi_sense_length < 18); + + switch (sense[0] & 0x7f) { /* sfmt */ + case 0x70: /* current, fixed */ + status[0] = 0 << 6; + break; + case 0x71: /* deferred, fixed */ + status[0] = 1 << 6; + break; + case 0x72: /* current, descriptor */ + case 0x73: /* deferred, descriptor */ + default: + /* + * TODO: SBP-3 specifies what we should do with descriptor + * format sense data + */ + pr_err("sbp_send_sense: unknown sense format: 0x%x\n", + sense[0]); + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED)); + return; + } + + status[0] |= se_cmd->scsi_status & 0x3f;/* status */ + status[1] = + (sense[0] & 0x80) | /* valid */ + ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ + (sense[2] & 0x0f); /* sense_key */ + status[2] = 0; /* XXX sense_code */ + status[3] = 0; /* XXX sense_qualifier */ + + /* information */ + status[4] = sense[3]; + status[5] = sense[4]; + status[6] = sense[5]; + status[7] = sense[6]; + + /* CDB-dependent */ + status[8] = sense[8]; + status[9] = sense[9]; + status[10] = sense[10]; + status[11] = sense[11]; + + /* fru */ + status[12] = sense[14]; + + /* sense_key-dependent */ + status[13] = sense[15]; + status[14] = sense[16]; + status[15] = sense[17]; + + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(5) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static int sbp_send_sense(struct sbp_target_request *req) +{ + struct se_cmd *se_cmd = &req->se_cmd; + + if (se_cmd->scsi_sense_length) { + sbp_sense_mangle(req); + } else { + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); + } + + return sbp_send_status(req); +} + +static void sbp_free_request(struct sbp_target_request *req) +{ + struct se_cmd *se_cmd = &req->se_cmd; + struct se_session *se_sess = se_cmd->se_sess; + + kfree(req->pg_tbl); + kfree(req->cmd_buf); + + target_free_tag(se_sess, se_cmd); +} + +static void sbp_mgt_agent_process(struct work_struct *work) +{ + struct sbp_management_agent *agent = + container_of(work, struct sbp_management_agent, work); + struct sbp_management_request *req = agent->request; + int ret; + int status_data_len = 0; + + /* fetch the ORB from the initiator */ + ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, + req->node_addr, req->generation, req->speed, + agent->orb_offset, &req->orb, sizeof(req->orb)); + if (ret != RCODE_COMPLETE) { + pr_debug("mgt_orb fetch failed: %x\n", ret); + goto out; + } + + pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n", + sbp2_pointer_to_addr(&req->orb.ptr1), + sbp2_pointer_to_addr(&req->orb.ptr2), + be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), + sbp2_pointer_to_addr(&req->orb.status_fifo)); + + if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || + ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { + pr_err("mgt_orb bad request\n"); + goto out; + } + + switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { + case MANAGEMENT_ORB_FUNCTION_LOGIN: + sbp_management_request_login(agent, req, &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS: + sbp_management_request_query_logins(agent, req, + &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_RECONNECT: + sbp_management_request_reconnect(agent, req, &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD: + pr_notice("SET PASSWORD not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_LOGOUT: + sbp_management_request_logout(agent, req, &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_ABORT_TASK: + pr_notice("ABORT TASK not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET: + pr_notice("ABORT TASK SET not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET: + pr_notice("LOGICAL UNIT RESET not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_TARGET_RESET: + pr_notice("TARGET RESET not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + default: + pr_notice("unknown management function 0x%x\n", + MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + } + + req->status.status |= cpu_to_be32( + STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */ + STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) | + STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32)); + req->status.orb_low = cpu_to_be32(agent->orb_offset); + + /* write the status block back to the initiator */ + ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, + req->node_addr, req->generation, req->speed, + sbp2_pointer_to_addr(&req->orb.status_fifo), + &req->status, 8 + status_data_len); + if (ret != RCODE_COMPLETE) { + pr_debug("mgt_orb status write failed: %x\n", ret); + goto out; + } + +out: + fw_card_put(req->card); + kfree(req); + + spin_lock_bh(&agent->lock); + agent->state = MANAGEMENT_AGENT_STATE_IDLE; + spin_unlock_bh(&agent->lock); +} + +static void sbp_mgt_agent_rw(struct fw_card *card, + struct fw_request *request, int tcode, int destination, int source, + int generation, unsigned long long offset, void *data, size_t length, + void *callback_data) +{ + struct sbp_management_agent *agent = callback_data; + struct sbp2_pointer *ptr = data; + int rcode = RCODE_ADDRESS_ERROR; + + if (!agent->tport->enable) + goto out; + + if ((offset != agent->handler.offset) || (length != 8)) + goto out; + + if (tcode == TCODE_WRITE_BLOCK_REQUEST) { + struct sbp_management_request *req; + int prev_state; + + spin_lock_bh(&agent->lock); + prev_state = agent->state; + agent->state = MANAGEMENT_AGENT_STATE_BUSY; + spin_unlock_bh(&agent->lock); + + if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) { + pr_notice("ignoring management request while busy\n"); + rcode = RCODE_CONFLICT_ERROR; + goto out; + } + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) { + rcode = RCODE_CONFLICT_ERROR; + goto out; + } + + req->card = fw_card_get(card); + req->generation = generation; + req->node_addr = source; + req->speed = fw_get_request_speed(request); + + agent->orb_offset = sbp2_pointer_to_addr(ptr); + agent->request = req; + + queue_work(system_unbound_wq, &agent->work); + rcode = RCODE_COMPLETE; + } else if (tcode == TCODE_READ_BLOCK_REQUEST) { + addr_to_sbp2_pointer(agent->orb_offset, ptr); + rcode = RCODE_COMPLETE; + } else { + rcode = RCODE_TYPE_ERROR; + } + +out: + fw_send_response(card, request, rcode); +} + +static struct sbp_management_agent *sbp_management_agent_register( + struct sbp_tport *tport) +{ + int ret; + struct sbp_management_agent *agent; + + agent = kmalloc(sizeof(*agent), GFP_KERNEL); + if (!agent) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&agent->lock); + agent->tport = tport; + agent->handler.length = 0x08; + agent->handler.address_callback = sbp_mgt_agent_rw; + agent->handler.callback_data = agent; + agent->state = MANAGEMENT_AGENT_STATE_IDLE; + INIT_WORK(&agent->work, sbp_mgt_agent_process); + agent->orb_offset = 0; + agent->request = NULL; + + ret = fw_core_add_address_handler(&agent->handler, + &sbp_register_region); + if (ret < 0) { + kfree(agent); + return ERR_PTR(ret); + } + + return agent; +} + +static void sbp_management_agent_unregister(struct sbp_management_agent *agent) +{ + fw_core_remove_address_handler(&agent->handler); + cancel_work_sync(&agent->work); + kfree(agent); +} + +static int sbp_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + + return &tport->tport_name[0]; +} + +static u16 sbp_get_tag(struct se_portal_group *se_tpg) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + return tpg->tport_tpgt; +} + +static void sbp_release_cmd(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + + sbp_free_request(req); +} + +static int sbp_write_pending(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + int ret; + + ret = sbp_rw_data(req); + if (ret) { + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + return ret; + } + + target_execute_cmd(se_cmd); + return 0; +} + +static int sbp_queue_data_in(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + int ret; + + ret = sbp_rw_data(req); + if (ret) { + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + return ret; + } + + return sbp_send_sense(req); +} + +/* + * Called after command (no data transfer) or after the write (to device) + * operation is completed + */ +static int sbp_queue_status(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + + return sbp_send_sense(req); +} + +static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) +{ +} + +static void sbp_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + +static int sbp_check_stop_free(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + + return transport_generic_free_cmd(&req->se_cmd, 0); +} + +static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) +{ + struct se_lun *lun; + int count = 0; + + rcu_read_lock(); + hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) + count++; + rcu_read_unlock(); + + return count; +} + +static int sbp_update_unit_directory(struct sbp_tport *tport) +{ + struct se_lun *lun; + int num_luns, num_entries, idx = 0, mgt_agt_addr, ret; + u32 *data; + + if (tport->unit_directory.data) { + fw_core_remove_descriptor(&tport->unit_directory); + kfree(tport->unit_directory.data); + tport->unit_directory.data = NULL; + } + + if (!tport->enable || !tport->tpg) + return 0; + + num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg); + + /* + * Number of entries in the final unit directory: + * - all of those in the template + * - management_agent + * - unit_characteristics + * - reconnect_timeout + * - unit unique ID + * - one for each LUN + * + * MUST NOT include leaf or sub-directory entries + */ + num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns; + + if (tport->directory_id != -1) + num_entries++; + + /* allocate num_entries + 4 for the header and unique ID leaf */ + data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* directory_length */ + data[idx++] = num_entries << 16; + + /* directory_id */ + if (tport->directory_id != -1) + data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id; + + /* unit directory template */ + memcpy(&data[idx], sbp_unit_directory_template, + sizeof(sbp_unit_directory_template)); + idx += ARRAY_SIZE(sbp_unit_directory_template); + + /* management_agent */ + mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4; + data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff); + + /* unit_characteristics */ + data[idx++] = 0x3a000000 | + (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) | + SBP_ORB_FETCH_SIZE; + + /* reconnect_timeout */ + data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff); + + /* unit unique ID (leaf is just after LUNs) */ + data[idx++] = 0x8d000000 | (num_luns + 1); + + rcu_read_lock(); + hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) { + struct se_device *dev; + int type; + /* + * rcu_dereference_raw protected by se_lun->lun_group symlink + * reference to se_device->dev_group. + */ + dev = rcu_dereference_raw(lun->lun_se_dev); + type = dev->transport->get_device_type(dev); + + /* logical_unit_number */ + data[idx++] = 0x14000000 | + ((type << 16) & 0x1f0000) | + (lun->unpacked_lun & 0xffff); + } + rcu_read_unlock(); + + /* unit unique ID leaf */ + data[idx++] = 2 << 16; + data[idx++] = tport->guid >> 32; + data[idx++] = tport->guid; + + tport->unit_directory.length = idx; + tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24; + tport->unit_directory.data = data; + + ret = fw_core_add_descriptor(&tport->unit_directory); + if (ret < 0) { + kfree(tport->unit_directory.data); + tport->unit_directory.data = NULL; + } + + return ret; +} + +static ssize_t sbp_parse_wwn(const char *name, u64 *wwn) +{ + const char *cp; + char c, nibble; + int pos = 0, err; + + *wwn = 0; + for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) { + c = *cp; + if (c == '\n' && cp[1] == '\0') + continue; + if (c == '\0') { + err = 2; + if (pos != 16) + goto fail; + return cp - name; + } + err = 3; + if (isdigit(c)) + nibble = c - '0'; + else if (isxdigit(c)) + nibble = tolower(c) - 'a' + 10; + else + goto fail; + *wwn = (*wwn << 4) | nibble; + pos++; + } + err = 4; +fail: + printk(KERN_INFO "err %u len %zu pos %u\n", + err, cp - name, pos); + return -1; +} + +static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) +{ + return snprintf(buf, len, "%016llx", wwn); +} + +static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name) +{ + u64 guid = 0; + + if (sbp_parse_wwn(name, &guid) < 0) + return -EINVAL; + return 0; +} + +static int sbp_post_link_lun( + struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + + return sbp_update_unit_directory(tpg->tport); +} + +static void sbp_pre_unlink_lun( + struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + int ret; + + if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) + tport->enable = 0; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) + pr_err("unlink LUN: failed to update unit directory\n"); +} + +static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn, + const char *name) +{ + struct sbp_tport *tport = + container_of(wwn, struct sbp_tport, tport_wwn); + + struct sbp_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + return ERR_PTR(-EINVAL); + + if (tport->tpg) { + pr_err("Only one TPG per Unit is possible.\n"); + return ERR_PTR(-EBUSY); + } + + tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); + if (!tpg) + return ERR_PTR(-ENOMEM); + + tpg->tport = tport; + tpg->tport_tpgt = tpgt; + tport->tpg = tpg; + + /* default attribute values */ + tport->enable = 0; + tport->directory_id = -1; + tport->mgt_orb_timeout = 15; + tport->max_reconnect_timeout = 5; + tport->max_logins_per_lun = 1; + + tport->mgt_agt = sbp_management_agent_register(tport); + if (IS_ERR(tport->mgt_agt)) { + ret = PTR_ERR(tport->mgt_agt); + goto out_free_tpg; + } + + ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP); + if (ret < 0) + goto out_unreg_mgt_agt; + + return &tpg->se_tpg; + +out_unreg_mgt_agt: + sbp_management_agent_unregister(tport->mgt_agt); +out_free_tpg: + tport->tpg = NULL; + kfree(tpg); + return ERR_PTR(ret); +} + +static void sbp_drop_tpg(struct se_portal_group *se_tpg) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + + core_tpg_deregister(se_tpg); + sbp_management_agent_unregister(tport->mgt_agt); + tport->tpg = NULL; + kfree(tpg); +} + +static struct se_wwn *sbp_make_tport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct sbp_tport *tport; + u64 guid = 0; + + if (sbp_parse_wwn(name, &guid) < 0) + return ERR_PTR(-EINVAL); + + tport = kzalloc(sizeof(*tport), GFP_KERNEL); + if (!tport) + return ERR_PTR(-ENOMEM); + + tport->guid = guid; + sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); + + return &tport->tport_wwn; +} + +static void sbp_drop_tport(struct se_wwn *wwn) +{ + struct sbp_tport *tport = + container_of(wwn, struct sbp_tport, tport_wwn); + + kfree(tport); +} + +static ssize_t sbp_wwn_version_show(struct config_item *item, char *page) +{ + return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION); +} + +CONFIGFS_ATTR_RO(sbp_wwn_, version); + +static struct configfs_attribute *sbp_wwn_attrs[] = { + &sbp_wwn_attr_version, + NULL, +}; + +static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + + if (tport->directory_id == -1) + return sprintf(page, "implicit\n"); + else + return sprintf(page, "%06x\n", tport->directory_id); +} + +static ssize_t sbp_tpg_directory_id_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + + if (tport->enable) { + pr_err("Cannot change the directory_id on an active target.\n"); + return -EBUSY; + } + + if (strstr(page, "implicit") == page) { + tport->directory_id = -1; + } else { + if (kstrtoul(page, 16, &val) < 0) + return -EINVAL; + if (val > 0xffffff) + return -EINVAL; + + tport->directory_id = val; + } + + return count; +} + +static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + int ret; + + if (enable) { + if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { + pr_err("Cannot enable a target with no LUNs!\n"); + return -EINVAL; + } + } else { + /* XXX: force-shutdown sessions instead? */ + spin_lock_bh(&se_tpg->session_lock); + if (!list_empty(&se_tpg->tpg_sess_list)) { + spin_unlock_bh(&se_tpg->session_lock); + return -EBUSY; + } + spin_unlock_bh(&se_tpg->session_lock); + } + + tport->enable = enable; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) { + pr_err("Could not update Config ROM\n"); + return ret; + } + + return 0; +} + +CONFIGFS_ATTR(sbp_tpg_, directory_id); + +static struct configfs_attribute *sbp_tpg_base_attrs[] = { + &sbp_tpg_attr_directory_id, + NULL, +}; + +static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + return sprintf(page, "%d\n", tport->mgt_orb_timeout); +} + +static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + int ret; + + if (kstrtoul(page, 0, &val) < 0) + return -EINVAL; + if ((val < 1) || (val > 127)) + return -EINVAL; + + if (tport->mgt_orb_timeout == val) + return count; + + tport->mgt_orb_timeout = val; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + return sprintf(page, "%d\n", tport->max_reconnect_timeout); +} + +static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + int ret; + + if (kstrtoul(page, 0, &val) < 0) + return -EINVAL; + if ((val < 1) || (val > 32767)) + return -EINVAL; + + if (tport->max_reconnect_timeout == val) + return count; + + tport->max_reconnect_timeout = val; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + return sprintf(page, "%d\n", tport->max_logins_per_lun); +} + +static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + + if (kstrtoul(page, 0, &val) < 0) + return -EINVAL; + if ((val < 1) || (val > 127)) + return -EINVAL; + + /* XXX: also check against current count? */ + + tport->max_logins_per_lun = val; + + return count; +} + +CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout); +CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout); +CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun); + +static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { + &sbp_tpg_attrib_attr_mgt_orb_timeout, + &sbp_tpg_attrib_attr_max_reconnect_timeout, + &sbp_tpg_attrib_attr_max_logins_per_lun, + NULL, +}; + +static const struct target_core_fabric_ops sbp_ops = { + .module = THIS_MODULE, + .fabric_name = "sbp", + .tpg_get_wwn = sbp_get_fabric_wwn, + .tpg_get_tag = sbp_get_tag, + .tpg_check_demo_mode = sbp_check_true, + .tpg_check_demo_mode_cache = sbp_check_true, + .release_cmd = sbp_release_cmd, + .write_pending = sbp_write_pending, + .queue_data_in = sbp_queue_data_in, + .queue_status = sbp_queue_status, + .queue_tm_rsp = sbp_queue_tm_rsp, + .aborted_task = sbp_aborted_task, + .check_stop_free = sbp_check_stop_free, + + .fabric_make_wwn = sbp_make_tport, + .fabric_drop_wwn = sbp_drop_tport, + .fabric_make_tpg = sbp_make_tpg, + .fabric_enable_tpg = sbp_enable_tpg, + .fabric_drop_tpg = sbp_drop_tpg, + .fabric_post_link = sbp_post_link_lun, + .fabric_pre_unlink = sbp_pre_unlink_lun, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_init_nodeacl = sbp_init_nodeacl, + + .tfc_wwn_attrs = sbp_wwn_attrs, + .tfc_tpg_base_attrs = sbp_tpg_base_attrs, + .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, +}; + +static int __init sbp_init(void) +{ + return target_register_template(&sbp_ops); +}; + +static void __exit sbp_exit(void) +{ + target_unregister_template(&sbp_ops); +}; + +MODULE_DESCRIPTION("FireWire SBP fabric driver"); +MODULE_LICENSE("GPL"); +module_init(sbp_init); +module_exit(sbp_exit); diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h new file mode 100644 index 0000000000..1d101ac865 --- /dev/null +++ b/drivers/target/sbp/sbp_target.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SBP_BASE_H +#define _SBP_BASE_H + +#include <linux/firewire.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/workqueue.h> +#include <target/target_core_base.h> + +#define SBP_VERSION "v0.1" +#define SBP_NAMELEN 32 + +#define SBP_ORB_FETCH_SIZE 8 + +#define MANAGEMENT_AGENT_STATE_IDLE 0 +#define MANAGEMENT_AGENT_STATE_BUSY 1 + +#define ORB_NOTIFY(v) (((v) >> 31) & 0x01) +#define ORB_REQUEST_FORMAT(v) (((v) >> 29) & 0x03) + +#define MANAGEMENT_ORB_FUNCTION(v) (((v) >> 16) & 0x0f) + +#define MANAGEMENT_ORB_FUNCTION_LOGIN 0x0 +#define MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS 0x1 +#define MANAGEMENT_ORB_FUNCTION_RECONNECT 0x3 +#define MANAGEMENT_ORB_FUNCTION_SET_PASSWORD 0x4 +#define MANAGEMENT_ORB_FUNCTION_LOGOUT 0x7 +#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK 0xb +#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET 0xc +#define MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET 0xe +#define MANAGEMENT_ORB_FUNCTION_TARGET_RESET 0xf + +#define LOGIN_ORB_EXCLUSIVE(v) (((v) >> 28) & 0x01) +#define LOGIN_ORB_RESERVED(v) (((v) >> 24) & 0x0f) +#define LOGIN_ORB_RECONNECT(v) (((v) >> 20) & 0x0f) +#define LOGIN_ORB_LUN(v) (((v) >> 0) & 0xffff) +#define LOGIN_ORB_PASSWORD_LENGTH(v) (((v) >> 16) & 0xffff) +#define LOGIN_ORB_RESPONSE_LENGTH(v) (((v) >> 0) & 0xffff) + +#define RECONNECT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff) +#define LOGOUT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff) + +#define CMDBLK_ORB_DIRECTION(v) (((v) >> 27) & 0x01) +#define CMDBLK_ORB_SPEED(v) (((v) >> 24) & 0x07) +#define CMDBLK_ORB_MAX_PAYLOAD(v) (((v) >> 20) & 0x0f) +#define CMDBLK_ORB_PG_TBL_PRESENT(v) (((v) >> 19) & 0x01) +#define CMDBLK_ORB_PG_SIZE(v) (((v) >> 16) & 0x07) +#define CMDBLK_ORB_DATA_SIZE(v) (((v) >> 0) & 0xffff) + +#define STATUS_BLOCK_SRC(v) (((v) & 0x03) << 30) +#define STATUS_BLOCK_RESP(v) (((v) & 0x03) << 28) +#define STATUS_BLOCK_DEAD(v) (((v) ? 1 : 0) << 27) +#define STATUS_BLOCK_LEN(v) (((v) & 0x07) << 24) +#define STATUS_BLOCK_SBP_STATUS(v) (((v) & 0xff) << 16) +#define STATUS_BLOCK_ORB_OFFSET_HIGH(v) (((v) & 0xffff) << 0) + +#define STATUS_SRC_ORB_CONTINUING 0 +#define STATUS_SRC_ORB_FINISHED 1 +#define STATUS_SRC_UNSOLICITED 2 + +#define STATUS_RESP_REQUEST_COMPLETE 0 +#define STATUS_RESP_TRANSPORT_FAILURE 1 +#define STATUS_RESP_ILLEGAL_REQUEST 2 +#define STATUS_RESP_VENDOR_DEPENDENT 3 + +#define SBP_STATUS_OK 0 +#define SBP_STATUS_REQ_TYPE_NOTSUPP 1 +#define SBP_STATUS_SPEED_NOTSUPP 2 +#define SBP_STATUS_PAGE_SIZE_NOTSUPP 3 +#define SBP_STATUS_ACCESS_DENIED 4 +#define SBP_STATUS_LUN_NOTSUPP 5 +#define SBP_STATUS_PAYLOAD_TOO_SMALL 6 +/* 7 is reserved */ +#define SBP_STATUS_RESOURCES_UNAVAIL 8 +#define SBP_STATUS_FUNCTION_REJECTED 9 +#define SBP_STATUS_LOGIN_ID_UNKNOWN 10 +#define SBP_STATUS_DUMMY_ORB_COMPLETE 11 +#define SBP_STATUS_REQUEST_ABORTED 12 +#define SBP_STATUS_UNSPECIFIED_ERROR 0xff + +#define AGENT_STATE_RESET 0 +#define AGENT_STATE_ACTIVE 1 +#define AGENT_STATE_SUSPENDED 2 +#define AGENT_STATE_DEAD 3 + +struct sbp2_pointer { + __be32 high; + __be32 low; +}; + +struct sbp_command_block_orb { + struct sbp2_pointer next_orb; + struct sbp2_pointer data_descriptor; + __be32 misc; + u8 command_block[12]; +}; + +struct sbp_page_table_entry { + __be16 segment_length; + __be16 segment_base_hi; + __be32 segment_base_lo; +}; + +struct sbp_management_orb { + struct sbp2_pointer ptr1; + struct sbp2_pointer ptr2; + __be32 misc; + __be32 length; + struct sbp2_pointer status_fifo; +}; + +struct sbp_status_block { + __be32 status; + __be32 orb_low; + u8 data[24]; +}; + +struct sbp_login_response_block { + __be32 misc; + struct sbp2_pointer command_block_agent; + __be32 reconnect_hold; +}; + +struct sbp_login_descriptor { + struct sbp_session *sess; + struct list_head link; + + u32 login_lun; + + u64 status_fifo_addr; + int exclusive; + u16 login_id; + + struct sbp_target_agent *tgt_agt; +}; + +struct sbp_session { + spinlock_t lock; + struct se_session *se_sess; + struct list_head login_list; + struct delayed_work maint_work; + + u64 guid; /* login_owner_EUI_64 */ + int node_id; /* login_owner_ID */ + + struct fw_card *card; + int generation; + int speed; + + int reconnect_hold; + u64 reconnect_expires; +}; + +struct sbp_tpg { + /* Target portal group tag for TCM */ + u16 tport_tpgt; + /* Pointer back to sbp_tport */ + struct sbp_tport *tport; + /* Returned by sbp_make_tpg() */ + struct se_portal_group se_tpg; +}; + +struct sbp_tport { + /* Target Unit Identifier (EUI-64) */ + u64 guid; + /* Target port name */ + char tport_name[SBP_NAMELEN]; + /* Returned by sbp_make_tport() */ + struct se_wwn tport_wwn; + + struct sbp_tpg *tpg; + + /* FireWire unit directory */ + struct fw_descriptor unit_directory; + + /* SBP Management Agent */ + struct sbp_management_agent *mgt_agt; + + /* Parameters */ + int enable; + s32 directory_id; + int mgt_orb_timeout; + int max_reconnect_timeout; + int max_logins_per_lun; +}; + +static inline u64 sbp2_pointer_to_addr(const struct sbp2_pointer *ptr) +{ + return (u64)(be32_to_cpu(ptr->high) & 0x0000ffff) << 32 | + (be32_to_cpu(ptr->low) & 0xfffffffc); +} + +static inline void addr_to_sbp2_pointer(u64 addr, struct sbp2_pointer *ptr) +{ + ptr->high = cpu_to_be32(addr >> 32); + ptr->low = cpu_to_be32(addr); +} + +struct sbp_target_agent { + spinlock_t lock; + struct fw_address_handler handler; + struct sbp_login_descriptor *login; + int state; + struct work_struct work; + u64 orb_pointer; + bool doorbell; +}; + +struct sbp_target_request { + struct sbp_login_descriptor *login; + u64 orb_pointer; + struct sbp_command_block_orb orb; + struct sbp_status_block status; + struct work_struct work; + + struct se_cmd se_cmd; + struct sbp_page_table_entry *pg_tbl; + void *cmd_buf; + + unsigned char sense_buf[TRANSPORT_SENSE_BUFFER]; +}; + +struct sbp_management_agent { + spinlock_t lock; + struct sbp_tport *tport; + struct fw_address_handler handler; + int state; + struct work_struct work; + u64 orb_offset; + struct sbp_management_request *request; +}; + +struct sbp_management_request { + struct sbp_management_orb orb; + struct sbp_status_block status; + struct fw_card *card; + int generation; + int node_addr; + int speed; +}; + +#endif diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c new file mode 100644 index 0000000000..3372856319 --- /dev/null +++ b/drivers/target/target_core_alua.c @@ -0,0 +1,2279 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_alua.c + * + * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) + * + * (c) Copyright 2009-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/configfs.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/fcntl.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <scsi/scsi_proto.h> +#include <asm/unaligned.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_ua.h" + +static sense_reason_t core_alua_check_transition(int state, int valid, + int *primary, int explicit); +static int core_alua_set_tg_pt_secondary_state( + struct se_lun *lun, int explicit, int offline); + +static char *core_alua_dump_state(int state); + +static void __target_attach_tg_pt_gp(struct se_lun *lun, + struct t10_alua_tg_pt_gp *tg_pt_gp); + +static u16 alua_lu_gps_counter; +static u32 alua_lu_gps_count; + +static DEFINE_SPINLOCK(lu_gps_lock); +static LIST_HEAD(lu_gps_list); + +struct t10_alua_lu_gp *default_lu_gp; + +/* + * REPORT REFERRALS + * + * See sbc3r35 section 5.23 + */ +sense_reason_t +target_emulate_report_referrals(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *map_mem; + unsigned char *buf; + u32 rd_len = 0, off; + + if (cmd->data_length < 4) { + pr_warn("REPORT REFERRALS allocation length %u too" + " small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + off = 4; + spin_lock(&dev->t10_alua.lba_map_lock); + if (list_empty(&dev->t10_alua.lba_map_list)) { + spin_unlock(&dev->t10_alua.lba_map_lock); + transport_kunmap_data_sg(cmd); + + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + int desc_num = off + 3; + int pg_num; + + off += 4; + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_first_lba, &buf[off]); + off += 8; + if (cmd->data_length > off) + put_unaligned_be64(map->lba_map_last_lba, &buf[off]); + off += 8; + rd_len += 20; + pg_num = 0; + list_for_each_entry(map_mem, &map->lba_map_mem_list, + lba_map_mem_list) { + int alua_state = map_mem->lba_map_mem_alua_state; + int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; + + if (cmd->data_length > off) + buf[off] = alua_state & 0x0f; + off += 2; + if (cmd->data_length > off) + buf[off] = (alua_pg_id >> 8) & 0xff; + off++; + if (cmd->data_length > off) + buf[off] = (alua_pg_id & 0xff); + off++; + rd_len += 4; + pg_num++; + } + if (cmd->data_length > desc_num) + buf[desc_num] = pg_num; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + + /* + * Set the RETURN DATA LENGTH set in the header of the DataIN Payload + */ + put_unaligned_be16(rd_len, &buf[2]); + + transport_kunmap_data_sg(cmd); + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +/* + * REPORT_TARGET_PORT_GROUPS + * + * See spc4r17 section 6.27 + */ +sense_reason_t +target_emulate_report_target_port_groups(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct se_lun *lun; + unsigned char *buf; + u32 rd_len = 0, off; + int ext_hdr = (cmd->t_task_cdb[1] & 0x20); + + /* + * Skip over RESERVED area to first Target port group descriptor + * depending on the PARAMETER DATA FORMAT type.. + */ + if (ext_hdr != 0) + off = 8; + else + off = 4; + + if (cmd->data_length < off) { + pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" + " small for %s header\n", cmd->data_length, + (ext_hdr) ? "extended" : "normal"); + return TCM_INVALID_CDB_FIELD; + } + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + /* Skip empty port groups */ + if (!tg_pt_gp->tg_pt_gp_members) + continue; + /* + * Check if the Target port group and Target port descriptor list + * based on tg_pt_gp_members count will fit into the response payload. + * Otherwise, bump rd_len to let the initiator know we have exceeded + * the allocation length and the response is truncated. + */ + if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > + cmd->data_length) { + rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); + continue; + } + /* + * PREF: Preferred target port bit, determine if this + * bit should be set for port group. + */ + if (tg_pt_gp->tg_pt_gp_pref) + buf[off] = 0x80; + /* + * Set the ASYMMETRIC ACCESS State + */ + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff; + /* + * Set supported ASYMMETRIC ACCESS State bits + */ + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; + /* + * TARGET PORT GROUP + */ + put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]); + off += 2; + + off++; /* Skip over Reserved */ + /* + * STATUS CODE + */ + buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); + /* + * Vendor Specific field + */ + buf[off++] = 0x00; + /* + * TARGET PORT COUNT + */ + buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); + rd_len += 8; + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, + lun_tg_pt_gp_link) { + /* + * Start Target Port descriptor format + * + * See spc4r17 section 6.2.7 Table 247 + */ + off += 2; /* Skip over Obsolete */ + /* + * Set RELATIVE TARGET PORT IDENTIFIER + */ + put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]); + off += 2; + rd_len += 4; + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + /* + * Set the RETURN DATA LENGTH set in the header of the DataIN Payload + */ + put_unaligned_be32(rd_len, &buf[0]); + + /* + * Fill in the Extended header parameter data format if requested + */ + if (ext_hdr != 0) { + buf[4] = 0x10; + /* + * Set the implicit transition time (in seconds) for the application + * client to use as a base for it's transition timeout value. + * + * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN + * this CDB was received upon to determine this value individually + * for ALUA target port group. + */ + rcu_read_lock(); + tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp); + if (tg_pt_gp) + buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; + rcu_read_unlock(); + } + transport_kunmap_data_sg(cmd); + + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4); + return 0; +} + +/* + * SET_TARGET_PORT_GROUPS for explicit ALUA operation. + * + * See spc4r17 section 6.35 + */ +sense_reason_t +target_emulate_set_target_port_groups(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_lun *l_lun = cmd->se_lun; + struct se_node_acl *nacl = cmd->se_sess->se_node_acl; + struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; + unsigned char *buf; + unsigned char *ptr; + sense_reason_t rc = TCM_NO_SENSE; + u32 len = 4; /* Skip over RESERVED area in header */ + int alua_access_state, primary = 0, valid_states; + u16 tg_pt_id, rtpi; + + if (cmd->data_length < 4) { + pr_warn("SET TARGET PORT GROUPS parameter list length %u too" + " small\n", cmd->data_length); + return TCM_INVALID_PARAMETER_LIST; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + /* + * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed + * for the local tg_pt_gp. + */ + rcu_read_lock(); + l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); + if (!l_tg_pt_gp) { + rcu_read_unlock(); + pr_err("Unable to access l_lun->tg_pt_gp\n"); + rc = TCM_UNSUPPORTED_SCSI_OPCODE; + goto out; + } + + if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { + rcu_read_unlock(); + pr_debug("Unable to process SET_TARGET_PORT_GROUPS" + " while TPGS_EXPLICIT_ALUA is disabled\n"); + rc = TCM_UNSUPPORTED_SCSI_OPCODE; + goto out; + } + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; + rcu_read_unlock(); + + ptr = &buf[4]; /* Skip over RESERVED area in header */ + + while (len < cmd->data_length) { + bool found = false; + alua_access_state = (ptr[0] & 0x0f); + /* + * Check the received ALUA access state, and determine if + * the state is a primary or secondary target port asymmetric + * access state. + */ + rc = core_alua_check_transition(alua_access_state, valid_states, + &primary, 1); + if (rc) { + /* + * If the SET TARGET PORT GROUPS attempts to establish + * an invalid combination of target port asymmetric + * access states or attempts to establish an + * unsupported target port asymmetric access state, + * then the command shall be terminated with CHECK + * CONDITION status, with the sense key set to ILLEGAL + * REQUEST, and the additional sense code set to INVALID + * FIELD IN PARAMETER LIST. + */ + goto out; + } + + /* + * If the ASYMMETRIC ACCESS STATE field (see table 267) + * specifies a primary target port asymmetric access state, + * then the TARGET PORT GROUP OR TARGET PORT field specifies + * a primary target port group for which the primary target + * port asymmetric access state shall be changed. If the + * ASYMMETRIC ACCESS STATE field specifies a secondary target + * port asymmetric access state, then the TARGET PORT GROUP OR + * TARGET PORT field specifies the relative target port + * identifier (see 3.1.120) of the target port for which the + * secondary target port asymmetric access state shall be + * changed. + */ + if (primary) { + tg_pt_id = get_unaligned_be16(ptr + 2); + /* + * Locate the matching target port group ID from + * the global tg_pt_gp list + */ + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, + &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + if (!tg_pt_gp->tg_pt_gp_valid_id) + continue; + + if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) + continue; + + atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); + + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + if (!core_alua_do_port_transition(tg_pt_gp, + dev, l_lun, nacl, + alua_access_state, 1)) + found = true; + + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); + break; + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + } else { + struct se_lun *lun; + + /* + * Extract the RELATIVE TARGET PORT IDENTIFIER to identify + * the Target Port in question for the incoming + * SET_TARGET_PORT_GROUPS op. + */ + rtpi = get_unaligned_be16(ptr + 2); + /* + * Locate the matching relative target port identifier + * for the struct se_device storage object. + */ + spin_lock(&dev->se_port_lock); + list_for_each_entry(lun, &dev->dev_sep_list, + lun_dev_link) { + if (lun->lun_tpg->tpg_rtpi != rtpi) + continue; + + // XXX: racy unlock + spin_unlock(&dev->se_port_lock); + + if (!core_alua_set_tg_pt_secondary_state( + lun, 1, 1)) + found = true; + + spin_lock(&dev->se_port_lock); + break; + } + spin_unlock(&dev->se_port_lock); + } + + if (!found) { + rc = TCM_INVALID_PARAMETER_LIST; + goto out; + } + + ptr += 4; + len += 4; + } + +out: + transport_kunmap_data_sg(cmd); + if (!rc) + target_complete_cmd(cmd, SAM_STAT_GOOD); + return rc; +} + +static inline void core_alua_state_nonoptimized( + struct se_cmd *cmd, + unsigned char *cdb, + int nonop_delay_msecs) +{ + /* + * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked + * later to determine if processing of this cmd needs to be + * temporarily delayed for the Active/NonOptimized primary access state. + */ + cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; + cmd->alua_nonop_delay = nonop_delay_msecs; +} + +static inline sense_reason_t core_alua_state_lba_dependent( + struct se_cmd *cmd, + u16 tg_pt_gp_id) +{ + struct se_device *dev = cmd->se_dev; + u64 segment_size, segment_mult, sectors, lba; + + /* Only need to check for cdb actually containing LBAs */ + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) + return 0; + + spin_lock(&dev->t10_alua.lba_map_lock); + segment_size = dev->t10_alua.lba_map_segment_size; + segment_mult = dev->t10_alua.lba_map_segment_multiplier; + sectors = cmd->data_length / dev->dev_attrib.block_size; + + lba = cmd->t_task_lba; + while (lba < cmd->t_task_lba + sectors) { + struct t10_alua_lba_map *cur_map = NULL, *map; + struct t10_alua_lba_map_member *map_mem; + + list_for_each_entry(map, &dev->t10_alua.lba_map_list, + lba_map_list) { + u64 start_lba, last_lba; + u64 first_lba = map->lba_map_first_lba; + + if (segment_mult) { + u64 tmp = lba; + start_lba = do_div(tmp, segment_size * segment_mult); + + last_lba = first_lba + segment_size - 1; + if (start_lba >= first_lba && + start_lba <= last_lba) { + lba += segment_size; + cur_map = map; + break; + } + } else { + last_lba = map->lba_map_last_lba; + if (lba >= first_lba && lba <= last_lba) { + lba = last_lba + 1; + cur_map = map; + break; + } + } + } + if (!cur_map) { + spin_unlock(&dev->t10_alua.lba_map_lock); + return TCM_ALUA_TG_PT_UNAVAILABLE; + } + list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, + lba_map_mem_list) { + if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id) + continue; + switch(map_mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_STANDBY: + spin_unlock(&dev->t10_alua.lba_map_lock); + return TCM_ALUA_TG_PT_STANDBY; + case ALUA_ACCESS_STATE_UNAVAILABLE: + spin_unlock(&dev->t10_alua.lba_map_lock); + return TCM_ALUA_TG_PT_UNAVAILABLE; + default: + break; + } + } + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return 0; +} + +static inline sense_reason_t core_alua_state_standby( + struct se_cmd *cmd, + unsigned char *cdb) +{ + /* + * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by + * spc4r17 section 5.9.2.4.4 + */ + switch (cdb[0]) { + case INQUIRY: + case LOG_SELECT: + case LOG_SENSE: + case MODE_SELECT: + case MODE_SENSE: + case REPORT_LUNS: + case RECEIVE_DIAGNOSTIC: + case SEND_DIAGNOSTIC: + case READ_CAPACITY: + return 0; + case SERVICE_ACTION_IN_16: + switch (cdb[1] & 0x1f) { + case SAI_READ_CAPACITY_16: + return 0; + default: + return TCM_ALUA_TG_PT_STANDBY; + } + case MAINTENANCE_IN: + switch (cdb[1] & 0x1f) { + case MI_REPORT_TARGET_PGS: + return 0; + default: + return TCM_ALUA_TG_PT_STANDBY; + } + case MAINTENANCE_OUT: + switch (cdb[1]) { + case MO_SET_TARGET_PGS: + return 0; + default: + return TCM_ALUA_TG_PT_STANDBY; + } + case REQUEST_SENSE: + case PERSISTENT_RESERVE_IN: + case PERSISTENT_RESERVE_OUT: + case READ_BUFFER: + case WRITE_BUFFER: + return 0; + default: + return TCM_ALUA_TG_PT_STANDBY; + } + + return 0; +} + +static inline sense_reason_t core_alua_state_unavailable( + struct se_cmd *cmd, + unsigned char *cdb) +{ + /* + * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by + * spc4r17 section 5.9.2.4.5 + */ + switch (cdb[0]) { + case INQUIRY: + case REPORT_LUNS: + return 0; + case MAINTENANCE_IN: + switch (cdb[1] & 0x1f) { + case MI_REPORT_TARGET_PGS: + return 0; + default: + return TCM_ALUA_TG_PT_UNAVAILABLE; + } + case MAINTENANCE_OUT: + switch (cdb[1]) { + case MO_SET_TARGET_PGS: + return 0; + default: + return TCM_ALUA_TG_PT_UNAVAILABLE; + } + case REQUEST_SENSE: + case READ_BUFFER: + case WRITE_BUFFER: + return 0; + default: + return TCM_ALUA_TG_PT_UNAVAILABLE; + } + + return 0; +} + +static inline sense_reason_t core_alua_state_transition( + struct se_cmd *cmd, + unsigned char *cdb) +{ + /* + * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by + * spc4r17 section 5.9.2.5 + */ + switch (cdb[0]) { + case INQUIRY: + case REPORT_LUNS: + return 0; + case MAINTENANCE_IN: + switch (cdb[1] & 0x1f) { + case MI_REPORT_TARGET_PGS: + return 0; + default: + return TCM_ALUA_STATE_TRANSITION; + } + case REQUEST_SENSE: + case READ_BUFFER: + case WRITE_BUFFER: + return 0; + default: + return TCM_ALUA_STATE_TRANSITION; + } + + return 0; +} + +/* + * return 1: Is used to signal LUN not accessible, and check condition/not ready + * return 0: Used to signal success + * return -1: Used to signal failure, and invalid cdb field + */ +sense_reason_t +target_alua_state_check(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *cdb = cmd->t_task_cdb; + struct se_lun *lun = cmd->se_lun; + struct t10_alua_tg_pt_gp *tg_pt_gp; + int out_alua_state, nonop_delay_msecs; + u16 tg_pt_gp_id; + sense_reason_t rc = TCM_NO_SENSE; + + if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) + return 0; + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) + return 0; + + /* + * First, check for a struct se_port specific secondary ALUA target port + * access state: OFFLINE + */ + if (atomic_read(&lun->lun_tg_pt_secondary_offline)) { + pr_debug("ALUA: Got secondary offline status for local" + " target port\n"); + return TCM_ALUA_OFFLINE; + } + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (!tg_pt_gp) { + rcu_read_unlock(); + return 0; + } + + out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; + nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; + tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; + rcu_read_unlock(); + /* + * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional + * statement so the compiler knows explicitly to check this case first. + * For the Optimized ALUA access state case, we want to process the + * incoming fabric cmd ASAP.. + */ + if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) + return 0; + + switch (out_alua_state) { + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); + break; + case ALUA_ACCESS_STATE_STANDBY: + rc = core_alua_state_standby(cmd, cdb); + break; + case ALUA_ACCESS_STATE_UNAVAILABLE: + rc = core_alua_state_unavailable(cmd, cdb); + break; + case ALUA_ACCESS_STATE_TRANSITION: + rc = core_alua_state_transition(cmd, cdb); + break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id); + break; + /* + * OFFLINE is a secondary ALUA target port group access state, that is + * handled above with struct se_lun->lun_tg_pt_secondary_offline=1 + */ + case ALUA_ACCESS_STATE_OFFLINE: + default: + pr_err("Unknown ALUA access state: 0x%02x\n", + out_alua_state); + rc = TCM_INVALID_CDB_FIELD; + } + + if (rc && rc != TCM_INVALID_CDB_FIELD) { + pr_debug("[%s]: ALUA TG Port not available, " + "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n", + cmd->se_tfo->fabric_name, rc); + } + + return rc; +} + +/* + * Check implicit and explicit ALUA state change request. + */ +static sense_reason_t +core_alua_check_transition(int state, int valid, int *primary, int explicit) +{ + /* + * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are + * defined as primary target port asymmetric access states. + */ + switch (state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + if (!(valid & ALUA_AO_SUP)) + goto not_supported; + *primary = 1; + break; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + if (!(valid & ALUA_AN_SUP)) + goto not_supported; + *primary = 1; + break; + case ALUA_ACCESS_STATE_STANDBY: + if (!(valid & ALUA_S_SUP)) + goto not_supported; + *primary = 1; + break; + case ALUA_ACCESS_STATE_UNAVAILABLE: + if (!(valid & ALUA_U_SUP)) + goto not_supported; + *primary = 1; + break; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + if (!(valid & ALUA_LBD_SUP)) + goto not_supported; + *primary = 1; + break; + case ALUA_ACCESS_STATE_OFFLINE: + /* + * OFFLINE state is defined as a secondary target port + * asymmetric access state. + */ + if (!(valid & ALUA_O_SUP)) + goto not_supported; + *primary = 0; + break; + case ALUA_ACCESS_STATE_TRANSITION: + if (!(valid & ALUA_T_SUP) || explicit) + /* + * Transitioning is set internally and by tcmu daemon, + * and cannot be selected through a STPG. + */ + goto not_supported; + *primary = 0; + break; + default: + pr_err("Unknown ALUA access state: 0x%02x\n", state); + return TCM_INVALID_PARAMETER_LIST; + } + + return 0; + +not_supported: + pr_err("ALUA access state %s not supported", + core_alua_dump_state(state)); + return TCM_INVALID_PARAMETER_LIST; +} + +static char *core_alua_dump_state(int state) +{ + switch (state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + return "Active/Optimized"; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + return "Active/NonOptimized"; + case ALUA_ACCESS_STATE_LBA_DEPENDENT: + return "LBA Dependent"; + case ALUA_ACCESS_STATE_STANDBY: + return "Standby"; + case ALUA_ACCESS_STATE_UNAVAILABLE: + return "Unavailable"; + case ALUA_ACCESS_STATE_OFFLINE: + return "Offline"; + case ALUA_ACCESS_STATE_TRANSITION: + return "Transitioning"; + default: + return "Unknown"; + } + + return NULL; +} + +char *core_alua_dump_status(int status) +{ + switch (status) { + case ALUA_STATUS_NONE: + return "None"; + case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: + return "Altered by Explicit STPG"; + case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: + return "Altered by Implicit ALUA"; + default: + return "Unknown"; + } + + return NULL; +} + +/* + * Used by fabric modules to determine when we need to delay processing + * for the Active/NonOptimized paths.. + */ +int core_alua_check_nonop_delay( + struct se_cmd *cmd) +{ + if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) + return 0; + /* + * The ALUA Active/NonOptimized access state delay can be disabled + * in via configfs with a value of zero + */ + if (!cmd->alua_nonop_delay) + return 0; + /* + * struct se_cmd->alua_nonop_delay gets set by a target port group + * defined interval in core_alua_state_nonoptimized() + */ + msleep_interruptible(cmd->alua_nonop_delay); + return 0; +} +EXPORT_SYMBOL(core_alua_check_nonop_delay); + +static int core_alua_write_tpg_metadata( + const char *path, + unsigned char *md_buf, + u32 md_buf_len) +{ + struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); + loff_t pos = 0; + int ret; + + if (IS_ERR(file)) { + pr_err("filp_open(%s) for ALUA metadata failed\n", path); + return -ENODEV; + } + ret = kernel_write(file, md_buf, md_buf_len, &pos); + if (ret < 0) + pr_err("Error writing ALUA metadata file: %s\n", path); + fput(file); + return (ret < 0) ? -EIO : 0; +} + +static int core_alua_update_tpg_primary_metadata( + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + unsigned char *md_buf; + struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; + char *path; + int len, rc; + + lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex); + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + return -ENOMEM; + } + + len = snprintf(md_buf, ALUA_MD_BUF_LEN, + "tg_pt_gp_id=%hu\n" + "alua_access_state=0x%02x\n" + "alua_access_status=0x%02x\n", + tg_pt_gp->tg_pt_gp_id, + tg_pt_gp->tg_pt_gp_alua_access_state, + tg_pt_gp->tg_pt_gp_alua_access_status); + + rc = -ENOMEM; + path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root, + &wwn->unit_serial[0], + config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); + if (path) { + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(path); + } + kfree(md_buf); + return rc; +} + +static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_dev_entry *se_deve; + struct se_lun *lun; + struct se_lun_acl *lacl; + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, + lun_tg_pt_gp_link) { + /* + * After an implicit target port asymmetric access state + * change, a device server shall establish a unit attention + * condition for the initiator port associated with every I_T + * nexus with the additional sense code set to ASYMMETRIC + * ACCESS STATE CHANGED. + * + * After an explicit target port asymmetric access state + * change, a device server shall establish a unit attention + * condition with the additional sense code set to ASYMMETRIC + * ACCESS STATE CHANGED for the initiator port associated with + * every I_T nexus other than the I_T nexus on which the SET + * TARGET PORT GROUPS command + */ + if (!percpu_ref_tryget_live(&lun->lun_ref)) + continue; + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + + spin_lock(&lun->lun_deve_lock); + list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { + lacl = se_deve->se_lun_acl; + + /* + * spc4r37 p.242: + * After an explicit target port asymmetric access + * state change, a device server shall establish a + * unit attention condition with the additional sense + * code set to ASYMMETRIC ACCESS STATE CHANGED for + * the initiator port associated with every I_T nexus + * other than the I_T nexus on which the SET TARGET + * PORT GROUPS command was received. + */ + if ((tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && + (tg_pt_gp->tg_pt_gp_alua_lun == lun)) + continue; + + /* + * se_deve->se_lun_acl pointer may be NULL for a + * entry created without explicit Node+MappedLUN ACLs + */ + if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && + (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl)) + continue; + + core_scsi3_ua_allocate(se_deve, 0x2A, + ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); + } + spin_unlock(&lun->lun_deve_lock); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + percpu_ref_put(&lun->lun_ref); + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); +} + +static int core_alua_do_transition_tg_pt( + struct t10_alua_tg_pt_gp *tg_pt_gp, + int new_state, + int explicit) +{ + int prev_state; + + mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex); + /* Nothing to be done here */ + if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); + return 0; + } + + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); + return -EAGAIN; + } + + /* + * Save the old primary ALUA access state, and set the current state + * to ALUA_ACCESS_STATE_TRANSITION. + */ + prev_state = tg_pt_gp->tg_pt_gp_alua_access_state; + tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION; + tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; + + core_alua_queue_state_change_ua(tg_pt_gp); + + if (new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); + return 0; + } + + /* + * Check for the optional ALUA primary state transition delay + */ + if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) + msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); + + /* + * Set the current primary ALUA access state to the requested new state + */ + tg_pt_gp->tg_pt_gp_alua_access_state = new_state; + + /* + * Update the ALUA metadata buf that has been allocated in + * core_alua_do_port_transition(), this metadata will be written + * to struct file. + * + * Note that there is the case where we do not want to update the + * metadata when the saved metadata is being parsed in userspace + * when setting the existing port access state and access status. + * + * Also note that the failure to write out the ALUA metadata to + * struct file does NOT affect the actual ALUA transition. + */ + if (tg_pt_gp->tg_pt_gp_write_metadata) { + core_alua_update_tpg_primary_metadata(tg_pt_gp); + } + + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" + " from primary access state %s to %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(prev_state), + core_alua_dump_state(new_state)); + + core_alua_queue_state_change_ua(tg_pt_gp); + + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); + return 0; +} + +int core_alua_do_port_transition( + struct t10_alua_tg_pt_gp *l_tg_pt_gp, + struct se_device *l_dev, + struct se_lun *l_lun, + struct se_node_acl *l_nacl, + int new_state, + int explicit) +{ + struct se_device *dev; + struct t10_alua_lu_gp *lu_gp; + struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; + struct t10_alua_tg_pt_gp *tg_pt_gp; + int primary, valid_states, rc = 0; + + if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) + return -ENODEV; + + valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; + if (core_alua_check_transition(new_state, valid_states, &primary, + explicit) != 0) + return -EINVAL; + + local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; + spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); + lu_gp = local_lu_gp_mem->lu_gp; + atomic_inc(&lu_gp->lu_gp_ref_cnt); + spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); + /* + * For storage objects that are members of the 'default_lu_gp', + * we only do transition on the passed *l_tp_pt_gp, and not + * on all of the matching target port groups IDs in default_lu_gp. + */ + if (!lu_gp->lu_gp_id) { + /* + * core_alua_do_transition_tg_pt() will always return + * success. + */ + l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun; + l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; + rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, + new_state, explicit); + atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); + return rc; + } + /* + * For all other LU groups aside from 'default_lu_gp', walk all of + * the associated storage objects looking for a matching target port + * group ID from the local target port group. + */ + spin_lock(&lu_gp->lu_gp_lock); + list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, + lu_gp_mem_list) { + + dev = lu_gp_mem->lu_gp_mem_dev; + atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); + spin_unlock(&lu_gp->lu_gp_lock); + + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, + &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + + if (!tg_pt_gp->tg_pt_gp_valid_id) + continue; + /* + * If the target behavior port asymmetric access state + * is changed for any target port group accessible via + * a logical unit within a LU group, the target port + * behavior group asymmetric access states for the same + * target port group accessible via other logical units + * in that LU group will also change. + */ + if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) + continue; + + if (l_tg_pt_gp == tg_pt_gp) { + tg_pt_gp->tg_pt_gp_alua_lun = l_lun; + tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; + } else { + tg_pt_gp->tg_pt_gp_alua_lun = NULL; + tg_pt_gp->tg_pt_gp_alua_nacl = NULL; + } + atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + /* + * core_alua_do_transition_tg_pt() will always return + * success. + */ + rc = core_alua_do_transition_tg_pt(tg_pt_gp, + new_state, explicit); + + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); + if (rc) + break; + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + spin_lock(&lu_gp->lu_gp_lock); + atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); + } + spin_unlock(&lu_gp->lu_gp_lock); + + if (!rc) { + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" + " Group IDs: %hu %s transition to primary state: %s\n", + config_item_name(&lu_gp->lu_gp_group.cg_item), + l_tg_pt_gp->tg_pt_gp_id, + (explicit) ? "explicit" : "implicit", + core_alua_dump_state(new_state)); + } + + atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); + return rc; +} + +static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) +{ + struct se_portal_group *se_tpg = lun->lun_tpg; + unsigned char *md_buf; + char *path; + int len, rc; + + mutex_lock(&lun->lun_tg_pt_md_mutex); + + md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); + rc = -ENOMEM; + goto out_unlock; + } + + len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" + "alua_tg_pt_status=0x%02x\n", + atomic_read(&lun->lun_tg_pt_secondary_offline), + lun->lun_tg_pt_secondary_stat); + + if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) { + path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu", + db_root, se_tpg->se_tpg_tfo->fabric_name, + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg), + lun->unpacked_lun); + } else { + path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu", + db_root, se_tpg->se_tpg_tfo->fabric_name, + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), + lun->unpacked_lun); + } + if (!path) { + rc = -ENOMEM; + goto out_free; + } + + rc = core_alua_write_tpg_metadata(path, md_buf, len); + kfree(path); +out_free: + kfree(md_buf); +out_unlock: + mutex_unlock(&lun->lun_tg_pt_md_mutex); + return rc; +} + +static int core_alua_set_tg_pt_secondary_state( + struct se_lun *lun, + int explicit, + int offline) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + int trans_delay_msecs; + + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (!tg_pt_gp) { + rcu_read_unlock(); + pr_err("Unable to complete secondary state" + " transition\n"); + return -EINVAL; + } + trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; + /* + * Set the secondary ALUA target port access state to OFFLINE + * or release the previously secondary state for struct se_lun + */ + if (offline) + atomic_set(&lun->lun_tg_pt_secondary_offline, 1); + else + atomic_set(&lun->lun_tg_pt_secondary_offline, 0); + + lun->lun_tg_pt_secondary_stat = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; + + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" + " to secondary access state: %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); + + rcu_read_unlock(); + /* + * Do the optional transition delay after we set the secondary + * ALUA access state. + */ + if (trans_delay_msecs != 0) + msleep_interruptible(trans_delay_msecs); + /* + * See if we need to update the ALUA fabric port metadata for + * secondary state and status + */ + if (lun->lun_tg_pt_secondary_write_md) + core_alua_update_tpg_secondary_metadata(lun); + + return 0; +} + +struct t10_alua_lba_map * +core_alua_allocate_lba_map(struct list_head *list, + u64 first_lba, u64 last_lba) +{ + struct t10_alua_lba_map *lba_map; + + lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); + if (!lba_map) { + pr_err("Unable to allocate struct t10_alua_lba_map\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&lba_map->lba_map_mem_list); + lba_map->lba_map_first_lba = first_lba; + lba_map->lba_map_last_lba = last_lba; + + list_add_tail(&lba_map->lba_map_list, list); + return lba_map; +} + +int +core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, + int pg_id, int state) +{ + struct t10_alua_lba_map_member *lba_map_mem; + + list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, + lba_map_mem_list) { + if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { + pr_err("Duplicate pg_id %d in lba_map\n", pg_id); + return -EINVAL; + } + } + + lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); + if (!lba_map_mem) { + pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); + return -ENOMEM; + } + lba_map_mem->lba_map_mem_alua_state = state; + lba_map_mem->lba_map_mem_alua_pg_id = pg_id; + + list_add_tail(&lba_map_mem->lba_map_mem_list, + &lba_map->lba_map_mem_list); + return 0; +} + +void +core_alua_free_lba_map(struct list_head *lba_list) +{ + struct t10_alua_lba_map *lba_map, *lba_map_tmp; + struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; + + list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, + lba_map_list) { + list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, + &lba_map->lba_map_mem_list, + lba_map_mem_list) { + list_del(&lba_map_mem->lba_map_mem_list); + kmem_cache_free(t10_alua_lba_map_mem_cache, + lba_map_mem); + } + list_del(&lba_map->lba_map_list); + kmem_cache_free(t10_alua_lba_map_cache, lba_map); + } +} + +void +core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, + int segment_size, int segment_mult) +{ + struct list_head old_lba_map_list; + struct t10_alua_tg_pt_gp *tg_pt_gp; + int activate = 0, supported; + + INIT_LIST_HEAD(&old_lba_map_list); + spin_lock(&dev->t10_alua.lba_map_lock); + dev->t10_alua.lba_map_segment_size = segment_size; + dev->t10_alua.lba_map_segment_multiplier = segment_mult; + list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); + if (lba_map_list) { + list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); + activate = 1; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + + if (!tg_pt_gp->tg_pt_gp_valid_id) + continue; + supported = tg_pt_gp->tg_pt_gp_alua_supported_states; + if (activate) + supported |= ALUA_LBD_SUP; + else + supported &= ~ALUA_LBD_SUP; + tg_pt_gp->tg_pt_gp_alua_supported_states = supported; + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + core_alua_free_lba_map(&old_lba_map_list); +} + +struct t10_alua_lu_gp * +core_alua_allocate_lu_gp(const char *name, int def_group) +{ + struct t10_alua_lu_gp *lu_gp; + + lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); + if (!lu_gp) { + pr_err("Unable to allocate struct t10_alua_lu_gp\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&lu_gp->lu_gp_node); + INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); + spin_lock_init(&lu_gp->lu_gp_lock); + atomic_set(&lu_gp->lu_gp_ref_cnt, 0); + + if (def_group) { + lu_gp->lu_gp_id = alua_lu_gps_counter++; + lu_gp->lu_gp_valid_id = 1; + alua_lu_gps_count++; + } + + return lu_gp; +} + +int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) +{ + struct t10_alua_lu_gp *lu_gp_tmp; + u16 lu_gp_id_tmp; + /* + * The lu_gp->lu_gp_id may only be set once.. + */ + if (lu_gp->lu_gp_valid_id) { + pr_warn("ALUA LU Group already has a valid ID," + " ignoring request\n"); + return -EINVAL; + } + + spin_lock(&lu_gps_lock); + if (alua_lu_gps_count == 0x0000ffff) { + pr_err("Maximum ALUA alua_lu_gps_count:" + " 0x0000ffff reached\n"); + spin_unlock(&lu_gps_lock); + kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); + return -ENOSPC; + } +again: + lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : + alua_lu_gps_counter++; + + list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { + if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { + if (!lu_gp_id) + goto again; + + pr_warn("ALUA Logical Unit Group ID: %hu" + " already exists, ignoring request\n", + lu_gp_id); + spin_unlock(&lu_gps_lock); + return -EINVAL; + } + } + + lu_gp->lu_gp_id = lu_gp_id_tmp; + lu_gp->lu_gp_valid_id = 1; + list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); + alua_lu_gps_count++; + spin_unlock(&lu_gps_lock); + + return 0; +} + +static struct t10_alua_lu_gp_member * +core_alua_allocate_lu_gp_mem(struct se_device *dev) +{ + struct t10_alua_lu_gp_member *lu_gp_mem; + + lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); + if (!lu_gp_mem) { + pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); + spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); + atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); + + lu_gp_mem->lu_gp_mem_dev = dev; + dev->dev_alua_lu_gp_mem = lu_gp_mem; + + return lu_gp_mem; +} + +void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) +{ + struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; + /* + * Once we have reached this point, config_item_put() has + * already been called from target_core_alua_drop_lu_gp(). + * + * Here, we remove the *lu_gp from the global list so that + * no associations can be made while we are releasing + * struct t10_alua_lu_gp. + */ + spin_lock(&lu_gps_lock); + list_del(&lu_gp->lu_gp_node); + alua_lu_gps_count--; + spin_unlock(&lu_gps_lock); + /* + * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() + * in target_core_configfs.c:target_core_store_alua_lu_gp() to be + * released with core_alua_put_lu_gp_from_name() + */ + while (atomic_read(&lu_gp->lu_gp_ref_cnt)) + cpu_relax(); + /* + * Release reference to struct t10_alua_lu_gp * from all associated + * struct se_device. + */ + spin_lock(&lu_gp->lu_gp_lock); + list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, + &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { + if (lu_gp_mem->lu_gp_assoc) { + list_del(&lu_gp_mem->lu_gp_mem_list); + lu_gp->lu_gp_members--; + lu_gp_mem->lu_gp_assoc = 0; + } + spin_unlock(&lu_gp->lu_gp_lock); + /* + * + * lu_gp_mem is associated with a single + * struct se_device->dev_alua_lu_gp_mem, and is released when + * struct se_device is released via core_alua_free_lu_gp_mem(). + * + * If the passed lu_gp does NOT match the default_lu_gp, assume + * we want to re-associate a given lu_gp_mem with default_lu_gp. + */ + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + if (lu_gp != default_lu_gp) + __core_alua_attach_lu_gp_mem(lu_gp_mem, + default_lu_gp); + else + lu_gp_mem->lu_gp = NULL; + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + spin_lock(&lu_gp->lu_gp_lock); + } + spin_unlock(&lu_gp->lu_gp_lock); + + kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); +} + +void core_alua_free_lu_gp_mem(struct se_device *dev) +{ + struct t10_alua_lu_gp *lu_gp; + struct t10_alua_lu_gp_member *lu_gp_mem; + + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!lu_gp_mem) + return; + + while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) + cpu_relax(); + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if (lu_gp) { + spin_lock(&lu_gp->lu_gp_lock); + if (lu_gp_mem->lu_gp_assoc) { + list_del(&lu_gp_mem->lu_gp_mem_list); + lu_gp->lu_gp_members--; + lu_gp_mem->lu_gp_assoc = 0; + } + spin_unlock(&lu_gp->lu_gp_lock); + lu_gp_mem->lu_gp = NULL; + } + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); +} + +struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) +{ + struct t10_alua_lu_gp *lu_gp; + struct config_item *ci; + + spin_lock(&lu_gps_lock); + list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { + if (!lu_gp->lu_gp_valid_id) + continue; + ci = &lu_gp->lu_gp_group.cg_item; + if (!strcmp(config_item_name(ci), name)) { + atomic_inc(&lu_gp->lu_gp_ref_cnt); + spin_unlock(&lu_gps_lock); + return lu_gp; + } + } + spin_unlock(&lu_gps_lock); + + return NULL; +} + +void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) +{ + spin_lock(&lu_gps_lock); + atomic_dec(&lu_gp->lu_gp_ref_cnt); + spin_unlock(&lu_gps_lock); +} + +/* + * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock + */ +void __core_alua_attach_lu_gp_mem( + struct t10_alua_lu_gp_member *lu_gp_mem, + struct t10_alua_lu_gp *lu_gp) +{ + spin_lock(&lu_gp->lu_gp_lock); + lu_gp_mem->lu_gp = lu_gp; + lu_gp_mem->lu_gp_assoc = 1; + list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); + lu_gp->lu_gp_members++; + spin_unlock(&lu_gp->lu_gp_lock); +} + +/* + * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock + */ +void __core_alua_drop_lu_gp_mem( + struct t10_alua_lu_gp_member *lu_gp_mem, + struct t10_alua_lu_gp *lu_gp) +{ + spin_lock(&lu_gp->lu_gp_lock); + list_del(&lu_gp_mem->lu_gp_mem_list); + lu_gp_mem->lu_gp = NULL; + lu_gp_mem->lu_gp_assoc = 0; + lu_gp->lu_gp_members--; + spin_unlock(&lu_gp->lu_gp_lock); +} + +struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, + const char *name, int def_group) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + + tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); + if (!tg_pt_gp) { + pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); + return NULL; + } + INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); + INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); + mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex); + spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); + atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); + tg_pt_gp->tg_pt_gp_dev = dev; + tg_pt_gp->tg_pt_gp_alua_access_state = + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; + /* + * Enable both explicit and implicit ALUA support by default + */ + tg_pt_gp->tg_pt_gp_alua_access_type = + TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; + /* + * Set the default Active/NonOptimized Delay in milliseconds + */ + tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; + tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; + tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; + + /* + * Enable all supported states + */ + tg_pt_gp->tg_pt_gp_alua_supported_states = + ALUA_T_SUP | ALUA_O_SUP | + ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; + + if (def_group) { + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + tg_pt_gp->tg_pt_gp_id = + dev->t10_alua.alua_tg_pt_gps_counter++; + tg_pt_gp->tg_pt_gp_valid_id = 1; + dev->t10_alua.alua_tg_pt_gps_count++; + list_add_tail(&tg_pt_gp->tg_pt_gp_list, + &dev->t10_alua.tg_pt_gps_list); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + } + + return tg_pt_gp; +} + +int core_alua_set_tg_pt_gp_id( + struct t10_alua_tg_pt_gp *tg_pt_gp, + u16 tg_pt_gp_id) +{ + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; + u16 tg_pt_gp_id_tmp; + + /* + * The tg_pt_gp->tg_pt_gp_id may only be set once.. + */ + if (tg_pt_gp->tg_pt_gp_valid_id) { + pr_warn("ALUA TG PT Group already has a valid ID," + " ignoring request\n"); + return -EINVAL; + } + + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { + pr_err("Maximum ALUA alua_tg_pt_gps_count:" + " 0x0000ffff reached\n"); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + return -ENOSPC; + } +again: + tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : + dev->t10_alua.alua_tg_pt_gps_counter++; + + list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { + if (!tg_pt_gp_id) + goto again; + + pr_err("ALUA Target Port Group ID: %hu already" + " exists, ignoring request\n", tg_pt_gp_id); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + return -EINVAL; + } + } + + tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; + tg_pt_gp->tg_pt_gp_valid_id = 1; + list_add_tail(&tg_pt_gp->tg_pt_gp_list, + &dev->t10_alua.tg_pt_gps_list); + dev->t10_alua.alua_tg_pt_gps_count++; + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + return 0; +} + +void core_alua_free_tg_pt_gp( + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + struct se_lun *lun, *next; + + /* + * Once we have reached this point, config_item_put() has already + * been called from target_core_alua_drop_tg_pt_gp(). + * + * Here we remove *tg_pt_gp from the global list so that + * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS + * can be made while we are releasing struct t10_alua_tg_pt_gp. + */ + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + if (tg_pt_gp->tg_pt_gp_valid_id) { + list_del(&tg_pt_gp->tg_pt_gp_list); + dev->t10_alua.alua_tg_pt_gps_count--; + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + /* + * Allow a struct t10_alua_tg_pt_gp_member * referenced by + * core_alua_get_tg_pt_gp_by_name() in + * target_core_configfs.c:target_core_store_alua_tg_pt_gp() + * to be released with core_alua_put_tg_pt_gp_from_name(). + */ + while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) + cpu_relax(); + + /* + * Release reference to struct t10_alua_tg_pt_gp from all associated + * struct se_port. + */ + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry_safe(lun, next, + &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) { + list_del_init(&lun->lun_tg_pt_gp_link); + tg_pt_gp->tg_pt_gp_members--; + + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + /* + * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, + * assume we want to re-associate a given tg_pt_gp_mem with + * default_tg_pt_gp. + */ + spin_lock(&lun->lun_tg_pt_gp_lock); + if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { + __target_attach_tg_pt_gp(lun, + dev->t10_alua.default_tg_pt_gp); + } else + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); + spin_unlock(&lun->lun_tg_pt_gp_lock); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + + synchronize_rcu(); + kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); +} + +static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( + struct se_device *dev, const char *name) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct config_item *ci; + + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, + tg_pt_gp_list) { + if (!tg_pt_gp->tg_pt_gp_valid_id) + continue; + ci = &tg_pt_gp->tg_pt_gp_group.cg_item; + if (!strcmp(config_item_name(ci), name)) { + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + return tg_pt_gp; + } + } + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + + return NULL; +} + +static void core_alua_put_tg_pt_gp_from_name( + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + + spin_lock(&dev->t10_alua.tg_pt_gps_lock); + atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); +} + +static void __target_attach_tg_pt_gp(struct se_lun *lun, + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + struct se_dev_entry *se_deve; + + assert_spin_locked(&lun->lun_tg_pt_gp_lock); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp); + list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); + tg_pt_gp->tg_pt_gp_members++; + spin_lock(&lun->lun_deve_lock); + list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) + core_scsi3_ua_allocate(se_deve, 0x3f, + ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED); + spin_unlock(&lun->lun_deve_lock); + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); +} + +void target_attach_tg_pt_gp(struct se_lun *lun, + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + spin_lock(&lun->lun_tg_pt_gp_lock); + __target_attach_tg_pt_gp(lun, tg_pt_gp); + spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); +} + +static void __target_detach_tg_pt_gp(struct se_lun *lun, + struct t10_alua_tg_pt_gp *tg_pt_gp) +{ + assert_spin_locked(&lun->lun_tg_pt_gp_lock); + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_del_init(&lun->lun_tg_pt_gp_link); + tg_pt_gp->tg_pt_gp_members--; + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); +} + +void target_detach_tg_pt_gp(struct se_lun *lun) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + + spin_lock(&lun->lun_tg_pt_gp_lock); + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); + if (tg_pt_gp) { + __target_detach_tg_pt_gp(lun, tg_pt_gp); + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); + } + spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); +} + +static void target_swap_tg_pt_gp(struct se_lun *lun, + struct t10_alua_tg_pt_gp *old_tg_pt_gp, + struct t10_alua_tg_pt_gp *new_tg_pt_gp) +{ + assert_spin_locked(&lun->lun_tg_pt_gp_lock); + + if (old_tg_pt_gp) + __target_detach_tg_pt_gp(lun, old_tg_pt_gp); + __target_attach_tg_pt_gp(lun, new_tg_pt_gp); +} + +ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) +{ + struct config_item *tg_pt_ci; + struct t10_alua_tg_pt_gp *tg_pt_gp; + ssize_t len = 0; + + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (tg_pt_gp) { + tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; + len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" + " %hu\nTG Port Primary Access State: %s\nTG Port " + "Primary Access Status: %s\nTG Port Secondary Access" + " State: %s\nTG Port Secondary Access Status: %s\n", + config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state( + tg_pt_gp->tg_pt_gp_alua_access_state), + core_alua_dump_status( + tg_pt_gp->tg_pt_gp_alua_access_status), + atomic_read(&lun->lun_tg_pt_secondary_offline) ? + "Offline" : "None", + core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); + } + rcu_read_unlock(); + + return len; +} + +ssize_t core_alua_store_tg_pt_gp_info( + struct se_lun *lun, + const char *page, + size_t count) +{ + struct se_portal_group *tpg = lun->lun_tpg; + /* + * rcu_dereference_raw protected by se_lun->lun_group symlink + * reference to se_device->dev_group. + */ + struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; + unsigned char buf[TG_PT_GROUP_NAME_BUF]; + int move = 0; + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || + (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) + return -ENODEV; + + if (count > TG_PT_GROUP_NAME_BUF) { + pr_err("ALUA Target Port Group alias too large!\n"); + return -EINVAL; + } + memset(buf, 0, TG_PT_GROUP_NAME_BUF); + memcpy(buf, page, count); + /* + * Any ALUA target port group alias besides "NULL" means we will be + * making a new group association. + */ + if (strcmp(strstrip(buf), "NULL")) { + /* + * core_alua_get_tg_pt_gp_by_name() will increment reference to + * struct t10_alua_tg_pt_gp. This reference is released with + * core_alua_put_tg_pt_gp_from_name() below. + */ + tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, + strstrip(buf)); + if (!tg_pt_gp_new) + return -ENODEV; + } + + spin_lock(&lun->lun_tg_pt_gp_lock); + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); + if (tg_pt_gp) { + /* + * Clearing an existing tg_pt_gp association, and replacing + * with the default_tg_pt_gp. + */ + if (!tg_pt_gp_new) { + pr_debug("Target_Core_ConfigFS: Moving" + " %s/tpgt_%hu/%s from ALUA Target Port Group:" + " alua/%s, ID: %hu back to" + " default_tg_pt_gp\n", + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), + config_item_name(&lun->lun_group.cg_item), + config_item_name( + &tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id); + + target_swap_tg_pt_gp(lun, tg_pt_gp, + dev->t10_alua.default_tg_pt_gp); + spin_unlock(&lun->lun_tg_pt_gp_lock); + + goto sync_rcu; + } + move = 1; + } + + target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new); + spin_unlock(&lun->lun_tg_pt_gp_lock); + pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" + " Target Port Group: alua/%s, ID: %hu\n", (move) ? + "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), + config_item_name(&lun->lun_group.cg_item), + config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), + tg_pt_gp_new->tg_pt_gp_id); + + core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); +sync_rcu: + synchronize_rcu(); + return count; +} + +ssize_t core_alua_show_access_type( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && + (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) + return sprintf(page, "Implicit and Explicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) + return sprintf(page, "Implicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) + return sprintf(page, "Explicit\n"); + else + return sprintf(page, "None\n"); +} + +ssize_t core_alua_store_access_type( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract alua_access_type\n"); + return ret; + } + if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { + pr_err("Illegal value for alua_access_type:" + " %lu\n", tmp); + return -EINVAL; + } + if (tmp == 3) + tg_pt_gp->tg_pt_gp_alua_access_type = + TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; + else if (tmp == 2) + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; + else if (tmp == 1) + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; + else + tg_pt_gp->tg_pt_gp_alua_access_type = 0; + + return count; +} + +ssize_t core_alua_show_nonop_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); +} + +ssize_t core_alua_store_nonop_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract nonop_delay_msecs\n"); + return ret; + } + if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { + pr_err("Passed nonop_delay_msecs: %lu, exceeds" + " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, + ALUA_MAX_NONOP_DELAY_MSECS); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; + + return count; +} + +ssize_t core_alua_show_trans_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); +} + +ssize_t core_alua_store_trans_delay_msecs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract trans_delay_msecs\n"); + return ret; + } + if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { + pr_err("Passed trans_delay_msecs: %lu, exceeds" + " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, + ALUA_MAX_TRANS_DELAY_MSECS); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; + + return count; +} + +ssize_t core_alua_show_implicit_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); +} + +ssize_t core_alua_store_implicit_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract implicit_trans_secs\n"); + return ret; + } + if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { + pr_err("Passed implicit_trans_secs: %lu, exceeds" + " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, + ALUA_MAX_IMPLICIT_TRANS_SECS); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; + + return count; +} + +ssize_t core_alua_show_preferred_bit( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); +} + +ssize_t core_alua_store_preferred_bit( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract preferred ALUA value\n"); + return ret; + } + if ((tmp != 0) && (tmp != 1)) { + pr_err("Illegal value for preferred ALUA: %lu\n", tmp); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_pref = (int)tmp; + + return count; +} + +ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) +{ + return sprintf(page, "%d\n", + atomic_read(&lun->lun_tg_pt_secondary_offline)); +} + +ssize_t core_alua_store_offline_bit( + struct se_lun *lun, + const char *page, + size_t count) +{ + /* + * rcu_dereference_raw protected by se_lun->lun_group symlink + * reference to se_device->dev_group. + */ + struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + unsigned long tmp; + int ret; + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || + (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) + return -ENODEV; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract alua_tg_pt_offline value\n"); + return ret; + } + if ((tmp != 0) && (tmp != 1)) { + pr_err("Illegal value for alua_tg_pt_offline: %lu\n", + tmp); + return -EINVAL; + } + + ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp); + if (ret < 0) + return -EINVAL; + + return count; +} + +ssize_t core_alua_show_secondary_status( + struct se_lun *lun, + char *page) +{ + return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat); +} + +ssize_t core_alua_store_secondary_status( + struct se_lun *lun, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract alua_tg_pt_status\n"); + return ret; + } + if ((tmp != ALUA_STATUS_NONE) && + (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { + pr_err("Illegal value for alua_tg_pt_status: %lu\n", + tmp); + return -EINVAL; + } + lun->lun_tg_pt_secondary_stat = (int)tmp; + + return count; +} + +ssize_t core_alua_show_secondary_write_metadata( + struct se_lun *lun, + char *page) +{ + return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md); +} + +ssize_t core_alua_store_secondary_write_metadata( + struct se_lun *lun, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract alua_tg_pt_write_md\n"); + return ret; + } + if ((tmp != 0) && (tmp != 1)) { + pr_err("Illegal value for alua_tg_pt_write_md:" + " %lu\n", tmp); + return -EINVAL; + } + lun->lun_tg_pt_secondary_write_md = (int)tmp; + + return count; +} + +int core_setup_alua(struct se_device *dev) +{ + if (!(dev->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_ALUA) && + !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { + struct t10_alua_lu_gp_member *lu_gp_mem; + + /* + * Associate this struct se_device with the default ALUA + * LUN Group. + */ + lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); + if (IS_ERR(lu_gp_mem)) + return PTR_ERR(lu_gp_mem); + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + __core_alua_attach_lu_gp_mem(lu_gp_mem, + default_lu_gp); + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + pr_debug("%s: Adding to default ALUA LU Group:" + " core/alua/lu_gps/default_lu_gp\n", + dev->transport->name); + } + + return 0; +} diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h new file mode 100644 index 0000000000..fc9637cce8 --- /dev/null +++ b/drivers/target/target_core_alua.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_ALUA_H +#define TARGET_CORE_ALUA_H + +#include <target/target_core_base.h> + +/* + * INQUIRY response data, TPGS Field + * + * from spc4r17 section 6.4.2 Table 135 + */ +#define TPGS_NO_ALUA 0x00 +#define TPGS_IMPLICIT_ALUA 0x10 +#define TPGS_EXPLICIT_ALUA 0x20 + +/* + * ASYMMETRIC ACCESS STATE field + * + * from spc4r36j section 6.37 Table 307 + */ +#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 +#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 +#define ALUA_ACCESS_STATE_STANDBY 0x2 +#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 +#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4 +#define ALUA_ACCESS_STATE_OFFLINE 0xe +#define ALUA_ACCESS_STATE_TRANSITION 0xf + +/* + * from spc4r36j section 6.37 Table 306 + */ +#define ALUA_T_SUP 0x80 +#define ALUA_O_SUP 0x40 +#define ALUA_LBD_SUP 0x10 +#define ALUA_U_SUP 0x08 +#define ALUA_S_SUP 0x04 +#define ALUA_AN_SUP 0x02 +#define ALUA_AO_SUP 0x01 + +/* + * REPORT_TARGET_PORT_GROUP STATUS CODE + * + * from spc4r17 section 6.27 Table 246 + */ +#define ALUA_STATUS_NONE 0x00 +#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01 +#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02 + +/* + * From spc4r17, Table D.1: ASC and ASCQ Assignement + */ +#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a +#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b +#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c +#define ASCQ_04H_ALUA_OFFLINE 0x12 + +/* + * Used as the default for Active/NonOptimized delay (in milliseconds) + * This can also be changed via configfs on a per target port group basis.. + */ +#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 +#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ +/* + * Used for implicit and explicit ALUA transitional delay, that is disabled + * by default, and is intended to be used for debugging client side ALUA code. + */ +#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 +#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ +/* + * Used for the recommended application client implicit transition timeout + * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. + */ +#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0 +#define ALUA_MAX_IMPLICIT_TRANS_SECS 255 + +/* Used by core_alua_update_tpg_(primary,secondary)_metadata */ +#define ALUA_MD_BUF_LEN 1024 + +extern struct kmem_cache *t10_alua_lu_gp_cache; +extern struct kmem_cache *t10_alua_lu_gp_mem_cache; +extern struct kmem_cache *t10_alua_tg_pt_gp_cache; +extern struct kmem_cache *t10_alua_lba_map_cache; +extern struct kmem_cache *t10_alua_lba_map_mem_cache; + +extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *); +extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *); +extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); +extern int core_alua_check_nonop_delay(struct se_cmd *); +extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, + struct se_device *, struct se_lun *, + struct se_node_acl *, int, int); +extern char *core_alua_dump_status(int); +extern struct t10_alua_lba_map *core_alua_allocate_lba_map( + struct list_head *, u64, u64); +extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int); +extern void core_alua_free_lba_map(struct list_head *); +extern void core_alua_set_lba_map(struct se_device *, struct list_head *, + int, int); +extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); +extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); +extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); +extern void core_alua_free_lu_gp_mem(struct se_device *); +extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *); +extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *); +extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *, + struct t10_alua_lu_gp *); +extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *, + struct t10_alua_lu_gp *); +extern void core_alua_drop_lu_gp_dev(struct se_device *); +extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( + struct se_device *, const char *, int); +extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); +extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *); +extern void target_detach_tg_pt_gp(struct se_lun *); +extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *); +extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *); +extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *, + size_t); +extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *); +extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *, + const char *, size_t); +extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *); +extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *, + size_t); +extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *); +extern ssize_t core_alua_store_secondary_status(struct se_lun *, + const char *, size_t); +extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *, + char *); +extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, + const char *, size_t); +extern int core_setup_alua(struct se_device *); +extern sense_reason_t target_alua_state_check(struct se_cmd *cmd); + +#endif /* TARGET_CORE_ALUA_H */ diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c new file mode 100644 index 0000000000..d5860c1c1f --- /dev/null +++ b/drivers/target/target_core_configfs.c @@ -0,0 +1,3759 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_configfs.c + * + * This file contains ConfigFS logic for the Generic Target Engine project. + * + * (c) Copyright 2008-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + * based on configfs Copyright (C) 2005 Oracle. All rights reserved. + * + ****************************************************************************/ + +#include <linux/kstrtox.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <generated/utsrelease.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/namei.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/unistd.h> +#include <linux/string.h> +#include <linux/parser.h> +#include <linux/syscalls.h> +#include <linux/configfs.h> +#include <linux/spinlock.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" +#include "target_core_rd.h" +#include "target_core_xcopy.h" + +#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ +static void target_core_setup_##_name##_cit(struct target_backend *tb) \ +{ \ + struct config_item_type *cit = &tb->tb_##_name##_cit; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = _attrs; \ + cit->ct_owner = tb->ops->owner; \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ +} + +#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ +static void target_core_setup_##_name##_cit(struct target_backend *tb) \ +{ \ + struct config_item_type *cit = &tb->tb_##_name##_cit; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = tb->ops->tb_##_name##_attrs; \ + cit->ct_owner = tb->ops->owner; \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ +} + +extern struct t10_alua_lu_gp *default_lu_gp; + +static LIST_HEAD(g_tf_list); +static DEFINE_MUTEX(g_tf_lock); + +static struct config_group target_core_hbagroup; +static struct config_group alua_group; +static struct config_group alua_lu_gps_group; + +static unsigned int target_devices; +static DEFINE_MUTEX(target_devices_lock); + +static inline struct se_hba * +item_to_hba(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_hba, hba_group); +} + +/* + * Attributes for /sys/kernel/config/target/ + */ +static ssize_t target_core_item_version_show(struct config_item *item, + char *page) +{ + return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" + " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, + utsname()->sysname, utsname()->machine); +} + +CONFIGFS_ATTR_RO(target_core_item_, version); + +char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT; +static char db_root_stage[DB_ROOT_LEN]; + +static ssize_t target_core_item_dbroot_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%s\n", db_root); +} + +static ssize_t target_core_item_dbroot_store(struct config_item *item, + const char *page, size_t count) +{ + ssize_t read_bytes; + struct file *fp; + ssize_t r = -EINVAL; + + mutex_lock(&target_devices_lock); + if (target_devices) { + pr_err("db_root: cannot be changed because it's in use\n"); + goto unlock; + } + + if (count > (DB_ROOT_LEN - 1)) { + pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n", + (int)count, DB_ROOT_LEN - 1); + goto unlock; + } + + read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page); + if (!read_bytes) + goto unlock; + + if (db_root_stage[read_bytes - 1] == '\n') + db_root_stage[read_bytes - 1] = '\0'; + + /* validate new db root before accepting it */ + fp = filp_open(db_root_stage, O_RDONLY, 0); + if (IS_ERR(fp)) { + pr_err("db_root: cannot open: %s\n", db_root_stage); + goto unlock; + } + if (!S_ISDIR(file_inode(fp)->i_mode)) { + filp_close(fp, NULL); + pr_err("db_root: not a directory: %s\n", db_root_stage); + goto unlock; + } + filp_close(fp, NULL); + + strncpy(db_root, db_root_stage, read_bytes); + pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); + + r = read_bytes; + +unlock: + mutex_unlock(&target_devices_lock); + return r; +} + +CONFIGFS_ATTR(target_core_item_, dbroot); + +static struct target_fabric_configfs *target_core_get_fabric( + const char *name) +{ + struct target_fabric_configfs *tf; + + if (!name) + return NULL; + + mutex_lock(&g_tf_lock); + list_for_each_entry(tf, &g_tf_list, tf_list) { + const char *cmp_name = tf->tf_ops->fabric_alias; + if (!cmp_name) + cmp_name = tf->tf_ops->fabric_name; + if (!strcmp(cmp_name, name)) { + atomic_inc(&tf->tf_access_cnt); + mutex_unlock(&g_tf_lock); + return tf; + } + } + mutex_unlock(&g_tf_lock); + + return NULL; +} + +/* + * Called from struct target_core_group_ops->make_group() + */ +static struct config_group *target_core_register_fabric( + struct config_group *group, + const char *name) +{ + struct target_fabric_configfs *tf; + int ret; + + pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" + " %s\n", group, name); + + tf = target_core_get_fabric(name); + if (!tf) { + pr_debug("target_core_register_fabric() trying autoload for %s\n", + name); + + /* + * Below are some hardcoded request_module() calls to automatically + * local fabric modules when the following is called: + * + * mkdir -p /sys/kernel/config/target/$MODULE_NAME + * + * Note that this does not limit which TCM fabric module can be + * registered, but simply provids auto loading logic for modules with + * mkdir(2) system calls with known TCM fabric modules. + */ + + if (!strncmp(name, "iscsi", 5)) { + /* + * Automatically load the LIO Target fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/iscsi + */ + ret = request_module("iscsi_target_mod"); + if (ret < 0) { + pr_debug("request_module() failed for" + " iscsi_target_mod.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } + } else if (!strncmp(name, "loopback", 8)) { + /* + * Automatically load the tcm_loop fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/loopback + */ + ret = request_module("tcm_loop"); + if (ret < 0) { + pr_debug("request_module() failed for" + " tcm_loop.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } + } + + tf = target_core_get_fabric(name); + } + + if (!tf) { + pr_debug("target_core_get_fabric() failed for %s\n", + name); + return ERR_PTR(-EINVAL); + } + pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" + " %s\n", tf->tf_ops->fabric_name); + /* + * On a successful target_core_get_fabric() look, the returned + * struct target_fabric_configfs *tf will contain a usage reference. + */ + pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", + &tf->tf_wwn_cit); + + config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit); + + config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", + &tf->tf_discovery_cit); + configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group); + + pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n", + config_item_name(&tf->tf_group.cg_item)); + return &tf->tf_group; +} + +/* + * Called from struct target_core_group_ops->drop_item() + */ +static void target_core_deregister_fabric( + struct config_group *group, + struct config_item *item) +{ + struct target_fabric_configfs *tf = container_of( + to_config_group(item), struct target_fabric_configfs, tf_group); + + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" + " tf list\n", config_item_name(item)); + + pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" + " %s\n", tf->tf_ops->fabric_name); + atomic_dec(&tf->tf_access_cnt); + + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" + " %s\n", config_item_name(item)); + + configfs_remove_default_groups(&tf->tf_group); + config_item_put(item); +} + +static struct configfs_group_operations target_core_fabric_group_ops = { + .make_group = &target_core_register_fabric, + .drop_item = &target_core_deregister_fabric, +}; + +/* + * All item attributes appearing in /sys/kernel/target/ appear here. + */ +static struct configfs_attribute *target_core_fabric_item_attrs[] = { + &target_core_item_attr_version, + &target_core_item_attr_dbroot, + NULL, +}; + +/* + * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ + */ +static const struct config_item_type target_core_fabrics_item = { + .ct_group_ops = &target_core_fabric_group_ops, + .ct_attrs = target_core_fabric_item_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem target_core_fabrics = { + .su_group = { + .cg_item = { + .ci_namebuf = "target", + .ci_type = &target_core_fabrics_item, + }, + }, +}; + +int target_depend_item(struct config_item *item) +{ + return configfs_depend_item(&target_core_fabrics, item); +} +EXPORT_SYMBOL(target_depend_item); + +void target_undepend_item(struct config_item *item) +{ + return configfs_undepend_item(item); +} +EXPORT_SYMBOL(target_undepend_item); + +/*############################################################################## +// Start functions called by external Target Fabrics Modules +//############################################################################*/ +static int target_disable_feature(struct se_portal_group *se_tpg) +{ + return 0; +} + +static u32 target_default_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 target_default_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static void target_set_default_node_attributes(struct se_node_acl *se_acl) +{ +} + +static int target_default_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) +{ + if (tfo->fabric_alias) { + if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) { + pr_err("Passed alias: %s exceeds " + "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias); + return -EINVAL; + } + } + if (!tfo->fabric_name) { + pr_err("Missing tfo->fabric_name\n"); + return -EINVAL; + } + if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) { + pr_err("Passed name: %s exceeds " + "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name); + return -EINVAL; + } + if (!tfo->tpg_get_wwn) { + pr_err("Missing tfo->tpg_get_wwn()\n"); + return -EINVAL; + } + if (!tfo->tpg_get_tag) { + pr_err("Missing tfo->tpg_get_tag()\n"); + return -EINVAL; + } + if (!tfo->release_cmd) { + pr_err("Missing tfo->release_cmd()\n"); + return -EINVAL; + } + if (!tfo->write_pending) { + pr_err("Missing tfo->write_pending()\n"); + return -EINVAL; + } + if (!tfo->queue_data_in) { + pr_err("Missing tfo->queue_data_in()\n"); + return -EINVAL; + } + if (!tfo->queue_status) { + pr_err("Missing tfo->queue_status()\n"); + return -EINVAL; + } + if (!tfo->queue_tm_rsp) { + pr_err("Missing tfo->queue_tm_rsp()\n"); + return -EINVAL; + } + if (!tfo->aborted_task) { + pr_err("Missing tfo->aborted_task()\n"); + return -EINVAL; + } + if (!tfo->check_stop_free) { + pr_err("Missing tfo->check_stop_free()\n"); + return -EINVAL; + } + /* + * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() + * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in + * target_core_fabric_configfs.c WWN+TPG group context code. + */ + if (!tfo->fabric_make_wwn) { + pr_err("Missing tfo->fabric_make_wwn()\n"); + return -EINVAL; + } + if (!tfo->fabric_drop_wwn) { + pr_err("Missing tfo->fabric_drop_wwn()\n"); + return -EINVAL; + } + if (!tfo->fabric_make_tpg) { + pr_err("Missing tfo->fabric_make_tpg()\n"); + return -EINVAL; + } + if (!tfo->fabric_drop_tpg) { + pr_err("Missing tfo->fabric_drop_tpg()\n"); + return -EINVAL; + } + + return 0; +} + +static void target_set_default_ops(struct target_core_fabric_ops *tfo) +{ + if (!tfo->tpg_check_demo_mode) + tfo->tpg_check_demo_mode = target_disable_feature; + + if (!tfo->tpg_check_demo_mode_cache) + tfo->tpg_check_demo_mode_cache = target_disable_feature; + + if (!tfo->tpg_check_demo_mode_write_protect) + tfo->tpg_check_demo_mode_write_protect = target_disable_feature; + + if (!tfo->tpg_check_prod_mode_write_protect) + tfo->tpg_check_prod_mode_write_protect = target_disable_feature; + + if (!tfo->tpg_get_inst_index) + tfo->tpg_get_inst_index = target_default_get_inst_index; + + if (!tfo->sess_get_index) + tfo->sess_get_index = target_default_sess_get_index; + + if (!tfo->set_default_node_attributes) + tfo->set_default_node_attributes = target_set_default_node_attributes; + + if (!tfo->get_cmd_state) + tfo->get_cmd_state = target_default_get_cmd_state; +} + +int target_register_template(const struct target_core_fabric_ops *fo) +{ + struct target_core_fabric_ops *tfo; + struct target_fabric_configfs *tf; + int ret; + + ret = target_fabric_tf_ops_check(fo); + if (ret) + return ret; + + tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); + if (!tf) { + pr_err("%s: could not allocate memory!\n", __func__); + return -ENOMEM; + } + tfo = kzalloc(sizeof(struct target_core_fabric_ops), GFP_KERNEL); + if (!tfo) { + kfree(tf); + pr_err("%s: could not allocate memory!\n", __func__); + return -ENOMEM; + } + memcpy(tfo, fo, sizeof(*tfo)); + target_set_default_ops(tfo); + + INIT_LIST_HEAD(&tf->tf_list); + atomic_set(&tf->tf_access_cnt, 0); + tf->tf_ops = tfo; + target_fabric_setup_cits(tf); + + mutex_lock(&g_tf_lock); + list_add_tail(&tf->tf_list, &g_tf_list); + mutex_unlock(&g_tf_lock); + + return 0; +} +EXPORT_SYMBOL(target_register_template); + +void target_unregister_template(const struct target_core_fabric_ops *fo) +{ + struct target_fabric_configfs *t; + + mutex_lock(&g_tf_lock); + list_for_each_entry(t, &g_tf_list, tf_list) { + if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) { + BUG_ON(atomic_read(&t->tf_access_cnt)); + list_del(&t->tf_list); + mutex_unlock(&g_tf_lock); + /* + * Wait for any outstanding fabric se_deve_entry->rcu_head + * callbacks to complete post kfree_rcu(), before allowing + * fabric driver unload of TFO->module to proceed. + */ + rcu_barrier(); + kfree(t->tf_tpg_base_cit.ct_attrs); + kfree(t->tf_ops); + kfree(t); + return; + } + } + mutex_unlock(&g_tf_lock); +} +EXPORT_SYMBOL(target_unregister_template); + +/*############################################################################## +// Stop functions called by external Target Fabrics Modules +//############################################################################*/ + +static inline struct se_dev_attrib *to_attrib(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_dev_attrib, + da_group); +} + +/* Start functions for struct config_item_type tb_dev_attrib_cit */ +#define DEF_CONFIGFS_ATTRIB_SHOW(_name) \ +static ssize_t _name##_show(struct config_item *item, char *page) \ +{ \ + return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \ +} + +DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr); +DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); +DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); +DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify); +DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); +DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord); +DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl); +DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size); +DEF_CONFIGFS_ATTRIB_SHOW(block_size); +DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors); +DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors); +DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth); +DEF_CONFIGFS_ATTRIB_SHOW(queue_depth); +DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count); +DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count); +DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); +DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); +DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data); +DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); +DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); + +#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ +static ssize_t _name##_store(struct config_item *item, const char *page,\ + size_t count) \ +{ \ + struct se_dev_attrib *da = to_attrib(item); \ + u32 val; \ + int ret; \ + \ + ret = kstrtou32(page, 0, &val); \ + if (ret < 0) \ + return ret; \ + da->_name = val; \ + return count; \ +} + +DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count); +DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count); +DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity); +DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment); +DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len); + +#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \ +static ssize_t _name##_store(struct config_item *item, const char *page, \ + size_t count) \ +{ \ + struct se_dev_attrib *da = to_attrib(item); \ + bool flag; \ + int ret; \ + \ + ret = kstrtobool(page, &flag); \ + if (ret < 0) \ + return ret; \ + da->_name = flag; \ + return count; \ +} + +DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids); +DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot); + +#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \ +static ssize_t _name##_store(struct config_item *item, const char *page,\ + size_t count) \ +{ \ + printk_once(KERN_WARNING \ + "ignoring deprecated %s attribute\n", \ + __stringify(_name)); \ + return count; \ +} + +DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo); +DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read); + +static void dev_set_t10_wwn_model_alias(struct se_device *dev) +{ + const char *configname; + + configname = config_item_name(&dev->dev_group.cg_item); + if (strlen(configname) >= INQUIRY_MODEL_LEN) { + pr_warn("dev[%p]: Backstore name '%s' is too long for " + "INQUIRY_MODEL, truncating to 15 characters\n", dev, + configname); + } + /* + * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1) + * here without potentially breaking existing setups, so continue to + * truncate one byte shorter than what can be carried in INQUIRY. + */ + strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); +} + +static ssize_t emulate_model_alias_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag; + int ret; + + if (dev->export_count) { + pr_err("dev[%p]: Unable to change model alias" + " while export_count is %d\n", + dev, dev->export_count); + return -EINVAL; + } + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); + if (flag) { + dev_set_t10_wwn_model_alias(dev); + } else { + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, + sizeof(dev->t10_wwn.model)); + } + da->emulate_model_alias = flag; + return count; +} + +static ssize_t emulate_write_cache_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + if (flag && da->da_dev->transport->get_write_cache) { + pr_err("emulate_write_cache not supported for this device\n"); + return -EINVAL; + } + + da->emulate_write_cache = flag; + pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", + da->da_dev, flag); + return count; +} + +static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (val != TARGET_UA_INTLCK_CTRL_CLEAR + && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR + && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { + pr_err("Illegal value %d\n", val); + return -EINVAL; + } + + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device" + " UA_INTRLCK_CTRL while export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + da->emulate_ua_intlck_ctrl = val; + pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", + da->da_dev, val); + return count; +} + +static ssize_t emulate_tas_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device TAS while" + " export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + da->emulate_tas = flag; + pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", + da->da_dev, flag ? "Enabled" : "Disabled"); + + return count; +} + +static ssize_t emulate_tpu_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + /* + * We expect this value to be non-zero when generic Block Layer + * Discard supported is detected iblock_create_virtdevice(). + */ + if (flag && !da->max_unmap_block_desc_count) { + if (!dev->transport->configure_unmap || + !dev->transport->configure_unmap(dev)) { + pr_err("Generic Block Discard not supported\n"); + return -ENOSYS; + } + } + + da->emulate_tpu = flag; + pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", + da->da_dev, flag); + return count; +} + +static ssize_t emulate_tpws_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + /* + * We expect this value to be non-zero when generic Block Layer + * Discard supported is detected iblock_create_virtdevice(). + */ + if (flag && !da->max_unmap_block_desc_count) { + if (!dev->transport->configure_unmap || + !dev->transport->configure_unmap(dev)) { + pr_err("Generic Block Discard not supported\n"); + return -ENOSYS; + } + } + + da->emulate_tpws = flag; + pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", + da->da_dev, flag); + return count; +} + +static ssize_t pi_prot_type_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + int old_prot = da->pi_prot_type, ret; + struct se_device *dev = da->da_dev; + u32 flag; + + ret = kstrtou32(page, 0, &flag); + if (ret < 0) + return ret; + + if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { + pr_err("Illegal value %d for pi_prot_type\n", flag); + return -EINVAL; + } + if (flag == 2) { + pr_err("DIF TYPE2 protection currently not supported\n"); + return -ENOSYS; + } + if (da->hw_pi_prot_type) { + pr_warn("DIF protection enabled on underlying hardware," + " ignoring\n"); + return count; + } + if (!dev->transport->init_prot || !dev->transport->free_prot) { + /* 0 is only allowed value for non-supporting backends */ + if (flag == 0) + return count; + + pr_err("DIF protection not supported by backend: %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!target_dev_configured(dev)) { + pr_err("DIF protection requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + da->pi_prot_type = flag; + + if (flag && !old_prot) { + ret = dev->transport->init_prot(dev); + if (ret) { + da->pi_prot_type = old_prot; + da->pi_prot_verify = (bool) da->pi_prot_type; + return ret; + } + + } else if (!flag && old_prot) { + dev->transport->free_prot(dev); + } + + da->pi_prot_verify = (bool) da->pi_prot_type; + pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); + return count; +} + +/* always zero, but attr needs to remain RW to avoid userspace breakage */ +static ssize_t pi_prot_format_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "0\n"); +} + +static ssize_t pi_prot_format_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + if (!flag) + return count; + + if (!dev->transport->format_prot) { + pr_err("DIF protection format not supported by backend %s\n", + dev->transport->name); + return -ENOSYS; + } + if (!target_dev_configured(dev)) { + pr_err("DIF protection format requires device to be configured\n"); + return -ENODEV; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to format SE Device PROT type while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + ret = dev->transport->format_prot(dev); + if (ret) + return ret; + + pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); + return count; +} + +static ssize_t pi_prot_verify_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + if (!flag) { + da->pi_prot_verify = flag; + return count; + } + if (da->hw_pi_prot_type) { + pr_warn("DIF protection enabled on underlying hardware," + " ignoring\n"); + return count; + } + if (!da->pi_prot_type) { + pr_warn("DIF protection not supported by backend, ignoring\n"); + return count; + } + da->pi_prot_verify = flag; + + return count; +} + +static ssize_t force_pr_aptpl_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to set force_pr_aptpl while" + " export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + + da->force_pr_aptpl = flag; + pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag); + return count; +} + +static ssize_t emulate_rest_reord_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + if (flag != 0) { + printk(KERN_ERR "dev[%p]: SE Device emulation of restricted" + " reordering not implemented\n", da->da_dev); + return -ENOSYS; + } + da->emulate_rest_reord = flag; + pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", + da->da_dev, flag); + return count; +} + +static ssize_t unmap_zeroes_data_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device" + " unmap_zeroes_data while export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + /* + * We expect this value to be non-zero when generic Block Layer + * Discard supported is detected iblock_configure_device(). + */ + if (flag && !da->max_unmap_block_desc_count) { + if (!dev->transport->configure_unmap || + !dev->transport->configure_unmap(dev)) { + pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n", + da->da_dev); + return -ENOSYS; + } + } + da->unmap_zeroes_data = flag; + pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", + da->da_dev, flag); + return count; +} + +/* + * Note, this can only be called on unexported SE Device Object. + */ +static ssize_t queue_depth_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device TCQ while" + " export_count is %d\n", + dev, dev->export_count); + return -EINVAL; + } + if (!val) { + pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev); + return -EINVAL; + } + + if (val > dev->dev_attrib.queue_depth) { + if (val > dev->dev_attrib.hw_queue_depth) { + pr_err("dev[%p]: Passed queue_depth:" + " %u exceeds TCM/SE_Device MAX" + " TCQ: %u\n", dev, val, + dev->dev_attrib.hw_queue_depth); + return -EINVAL; + } + } + da->queue_depth = dev->queue_depth = val; + pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val); + return count; +} + +static ssize_t optimal_sectors_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device" + " optimal_sectors while export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + if (val > da->hw_max_sectors) { + pr_err("dev[%p]: Passed optimal_sectors %u cannot be" + " greater than hw_max_sectors: %u\n", + da->da_dev, val, da->hw_max_sectors); + return -EINVAL; + } + + da->optimal_sectors = val; + pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", + da->da_dev, val); + return count; +} + +static ssize_t block_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (da->da_dev->export_count) { + pr_err("dev[%p]: Unable to change SE Device block_size" + " while export_count is %d\n", + da->da_dev, da->da_dev->export_count); + return -EINVAL; + } + + if (val != 512 && val != 1024 && val != 2048 && val != 4096) { + pr_err("dev[%p]: Illegal value for block_device: %u" + " for SE device, must be 512, 1024, 2048 or 4096\n", + da->da_dev, val); + return -EINVAL; + } + + da->block_size = val; + + pr_debug("dev[%p]: SE Device block_size changed to %u\n", + da->da_dev, val); + return count; +} + +static ssize_t alua_support_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = to_attrib(item); + u8 flags = da->da_dev->transport_flags; + + return snprintf(page, PAGE_SIZE, "%d\n", + flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1); +} + +static ssize_t alua_support_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag, oldflag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA); + if (flag == oldflag) + return count; + + if (!(dev->transport->transport_flags_changeable & + TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { + pr_err("dev[%p]: Unable to change SE Device alua_support:" + " alua_support has fixed value\n", dev); + return -ENOSYS; + } + + if (flag) + dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; + else + dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA; + return count; +} + +static ssize_t pgr_support_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = to_attrib(item); + u8 flags = da->da_dev->transport_flags; + + return snprintf(page, PAGE_SIZE, "%d\n", + flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1); +} + +static ssize_t pgr_support_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + struct se_device *dev = da->da_dev; + bool flag, oldflag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR); + if (flag == oldflag) + return count; + + if (!(dev->transport->transport_flags_changeable & + TRANSPORT_FLAG_PASSTHROUGH_PGR)) { + pr_err("dev[%p]: Unable to change SE Device pgr_support:" + " pgr_support has fixed value\n", dev); + return -ENOSYS; + } + + if (flag) + dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR; + else + dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR; + return count; +} + +static ssize_t emulate_rsoc_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = kstrtobool(page, &flag); + if (ret < 0) + return ret; + + da->emulate_rsoc = flag; + pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n", + da->da_dev, flag); + return count; +} + +CONFIGFS_ATTR(, emulate_model_alias); +CONFIGFS_ATTR(, emulate_dpo); +CONFIGFS_ATTR(, emulate_fua_write); +CONFIGFS_ATTR(, emulate_fua_read); +CONFIGFS_ATTR(, emulate_write_cache); +CONFIGFS_ATTR(, emulate_ua_intlck_ctrl); +CONFIGFS_ATTR(, emulate_tas); +CONFIGFS_ATTR(, emulate_tpu); +CONFIGFS_ATTR(, emulate_tpws); +CONFIGFS_ATTR(, emulate_caw); +CONFIGFS_ATTR(, emulate_3pc); +CONFIGFS_ATTR(, emulate_pr); +CONFIGFS_ATTR(, emulate_rsoc); +CONFIGFS_ATTR(, pi_prot_type); +CONFIGFS_ATTR_RO(, hw_pi_prot_type); +CONFIGFS_ATTR(, pi_prot_format); +CONFIGFS_ATTR(, pi_prot_verify); +CONFIGFS_ATTR(, enforce_pr_isids); +CONFIGFS_ATTR(, is_nonrot); +CONFIGFS_ATTR(, emulate_rest_reord); +CONFIGFS_ATTR(, force_pr_aptpl); +CONFIGFS_ATTR_RO(, hw_block_size); +CONFIGFS_ATTR(, block_size); +CONFIGFS_ATTR_RO(, hw_max_sectors); +CONFIGFS_ATTR(, optimal_sectors); +CONFIGFS_ATTR_RO(, hw_queue_depth); +CONFIGFS_ATTR(, queue_depth); +CONFIGFS_ATTR(, max_unmap_lba_count); +CONFIGFS_ATTR(, max_unmap_block_desc_count); +CONFIGFS_ATTR(, unmap_granularity); +CONFIGFS_ATTR(, unmap_granularity_alignment); +CONFIGFS_ATTR(, unmap_zeroes_data); +CONFIGFS_ATTR(, max_write_same_len); +CONFIGFS_ATTR(, alua_support); +CONFIGFS_ATTR(, pgr_support); + +/* + * dev_attrib attributes for devices using the target core SBC/SPC + * interpreter. Any backend using spc_parse_cdb should be using + * these. + */ +struct configfs_attribute *sbc_attrib_attrs[] = { + &attr_emulate_model_alias, + &attr_emulate_dpo, + &attr_emulate_fua_write, + &attr_emulate_fua_read, + &attr_emulate_write_cache, + &attr_emulate_ua_intlck_ctrl, + &attr_emulate_tas, + &attr_emulate_tpu, + &attr_emulate_tpws, + &attr_emulate_caw, + &attr_emulate_3pc, + &attr_emulate_pr, + &attr_pi_prot_type, + &attr_hw_pi_prot_type, + &attr_pi_prot_format, + &attr_pi_prot_verify, + &attr_enforce_pr_isids, + &attr_is_nonrot, + &attr_emulate_rest_reord, + &attr_force_pr_aptpl, + &attr_hw_block_size, + &attr_block_size, + &attr_hw_max_sectors, + &attr_optimal_sectors, + &attr_hw_queue_depth, + &attr_queue_depth, + &attr_max_unmap_lba_count, + &attr_max_unmap_block_desc_count, + &attr_unmap_granularity, + &attr_unmap_granularity_alignment, + &attr_unmap_zeroes_data, + &attr_max_write_same_len, + &attr_alua_support, + &attr_pgr_support, + &attr_emulate_rsoc, + NULL, +}; +EXPORT_SYMBOL(sbc_attrib_attrs); + +/* + * Minimal dev_attrib attributes for devices passing through CDBs. + * In this case we only provide a few read-only attributes for + * backwards compatibility. + */ +struct configfs_attribute *passthrough_attrib_attrs[] = { + &attr_hw_pi_prot_type, + &attr_hw_block_size, + &attr_hw_max_sectors, + &attr_hw_queue_depth, + &attr_emulate_pr, + &attr_alua_support, + &attr_pgr_support, + NULL, +}; +EXPORT_SYMBOL(passthrough_attrib_attrs); + +/* + * pr related dev_attrib attributes for devices passing through CDBs, + * but allowing in core pr emulation. + */ +struct configfs_attribute *passthrough_pr_attrib_attrs[] = { + &attr_enforce_pr_isids, + &attr_force_pr_aptpl, + NULL, +}; +EXPORT_SYMBOL(passthrough_pr_attrib_attrs); + +TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL); +TB_CIT_SETUP_DRV(dev_action, NULL, NULL); + +/* End functions for struct config_item_type tb_dev_attrib_cit */ + +/* Start functions for struct config_item_type tb_dev_wwn_cit */ + +static struct t10_wwn *to_t10_wwn(struct config_item *item) +{ + return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group); +} + +static ssize_t target_check_inquiry_data(char *buf) +{ + size_t len; + int i; + + len = strlen(buf); + + /* + * SPC 4.3.1: + * ASCII data fields shall contain only ASCII printable characters + * (i.e., code values 20h to 7Eh) and may be terminated with one or + * more ASCII null (00h) characters. + */ + for (i = 0; i < len; i++) { + if (buf[i] < 0x20 || buf[i] > 0x7E) { + pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n"); + return -EINVAL; + } + } + + return len; +} + +/* + * STANDARD and VPD page 0x83 T10 Vendor Identification + */ +static ssize_t target_wwn_vendor_id_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]); +} + +static ssize_t target_wwn_vendor_id_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_wwn *t10_wwn = to_t10_wwn(item); + struct se_device *dev = t10_wwn->t10_dev; + /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ + unsigned char buf[INQUIRY_VENDOR_LEN + 2]; + char *stripped = NULL; + ssize_t len; + ssize_t ret; + + len = strscpy(buf, page, sizeof(buf)); + if (len > 0) { + /* Strip any newline added from userspace. */ + stripped = strstrip(buf); + len = strlen(stripped); + } + if (len < 0 || len > INQUIRY_VENDOR_LEN) { + pr_err("Emulated T10 Vendor Identification exceeds" + " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN) + "\n"); + return -EOVERFLOW; + } + + ret = target_check_inquiry_data(stripped); + + if (ret < 0) + return ret; + + /* + * Check to see if any active exports exist. If they do exist, fail + * here as changing this information on the fly (underneath the + * initiator side OS dependent multipath code) could cause negative + * effects. + */ + if (dev->export_count) { + pr_err("Unable to set T10 Vendor Identification while" + " active %d exports exist\n", dev->export_count); + return -EINVAL; + } + + BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); + strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); + + pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" + " %s\n", dev->t10_wwn.vendor); + + return count; +} + +static ssize_t target_wwn_product_id_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]); +} + +static ssize_t target_wwn_product_id_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_wwn *t10_wwn = to_t10_wwn(item); + struct se_device *dev = t10_wwn->t10_dev; + /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ + unsigned char buf[INQUIRY_MODEL_LEN + 2]; + char *stripped = NULL; + ssize_t len; + ssize_t ret; + + len = strscpy(buf, page, sizeof(buf)); + if (len > 0) { + /* Strip any newline added from userspace. */ + stripped = strstrip(buf); + len = strlen(stripped); + } + if (len < 0 || len > INQUIRY_MODEL_LEN) { + pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: " + __stringify(INQUIRY_MODEL_LEN) + "\n"); + return -EOVERFLOW; + } + + ret = target_check_inquiry_data(stripped); + + if (ret < 0) + return ret; + + /* + * Check to see if any active exports exist. If they do exist, fail + * here as changing this information on the fly (underneath the + * initiator side OS dependent multipath code) could cause negative + * effects. + */ + if (dev->export_count) { + pr_err("Unable to set T10 Model while active %d exports exist\n", + dev->export_count); + return -EINVAL; + } + + BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); + strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); + + pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", + dev->t10_wwn.model); + + return count; +} + +static ssize_t target_wwn_revision_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]); +} + +static ssize_t target_wwn_revision_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_wwn *t10_wwn = to_t10_wwn(item); + struct se_device *dev = t10_wwn->t10_dev; + /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ + unsigned char buf[INQUIRY_REVISION_LEN + 2]; + char *stripped = NULL; + ssize_t len; + ssize_t ret; + + len = strscpy(buf, page, sizeof(buf)); + if (len > 0) { + /* Strip any newline added from userspace. */ + stripped = strstrip(buf); + len = strlen(stripped); + } + if (len < 0 || len > INQUIRY_REVISION_LEN) { + pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: " + __stringify(INQUIRY_REVISION_LEN) + "\n"); + return -EOVERFLOW; + } + + ret = target_check_inquiry_data(stripped); + + if (ret < 0) + return ret; + + /* + * Check to see if any active exports exist. If they do exist, fail + * here as changing this information on the fly (underneath the + * initiator side OS dependent multipath code) could cause negative + * effects. + */ + if (dev->export_count) { + pr_err("Unable to set T10 Revision while active %d exports exist\n", + dev->export_count); + return -EINVAL; + } + + BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); + strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); + + pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", + dev->t10_wwn.revision); + + return count; +} + +static ssize_t +target_wwn_company_id_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%#08x\n", + to_t10_wwn(item)->company_id); +} + +static ssize_t +target_wwn_company_id_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_wwn *t10_wwn = to_t10_wwn(item); + struct se_device *dev = t10_wwn->t10_dev; + u32 val; + int ret; + + /* + * The IEEE COMPANY_ID field should contain a 24-bit canonical + * form OUI assigned by the IEEE. + */ + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + if (val >= 0x1000000) + return -EOVERFLOW; + + /* + * Check to see if any active exports exist. If they do exist, fail + * here as changing this information on the fly (underneath the + * initiator side OS dependent multipath code) could cause negative + * effects. + */ + if (dev->export_count) { + pr_err("Unable to set Company ID while %u exports exist\n", + dev->export_count); + return -EINVAL; + } + + t10_wwn->company_id = val; + + pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n", + t10_wwn->company_id); + + return count; +} + +/* + * VPD page 0x80 Unit serial + */ +static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, + char *page) +{ + return sprintf(page, "T10 VPD Unit Serial Number: %s\n", + &to_t10_wwn(item)->unit_serial[0]); +} + +static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_wwn *t10_wwn = to_t10_wwn(item); + struct se_device *dev = t10_wwn->t10_dev; + unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { }; + + /* + * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial + * from the struct scsi_device level firmware, do not allow + * VPD Unit Serial to be emulated. + * + * Note this struct scsi_device could also be emulating VPD + * information from its drivers/scsi LLD. But for now we assume + * it is doing 'the right thing' wrt a world wide unique + * VPD Unit Serial Number that OS dependent multipath can depend on. + */ + if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) { + pr_err("Underlying SCSI device firmware provided VPD" + " Unit Serial, ignoring request\n"); + return -EOPNOTSUPP; + } + + if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { + pr_err("Emulated VPD Unit Serial exceeds" + " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); + return -EOVERFLOW; + } + /* + * Check to see if any active $FABRIC_MOD exports exist. If they + * do exist, fail here as changing this information on the fly + * (underneath the initiator side OS dependent multipath code) + * could cause negative effects. + */ + if (dev->export_count) { + pr_err("Unable to set VPD Unit Serial while" + " active %d $FABRIC_MOD exports exist\n", + dev->export_count); + return -EINVAL; + } + + /* + * This currently assumes ASCII encoding for emulated VPD Unit Serial. + * + * Also, strip any newline added from the userspace + * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial + */ + snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); + snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, + "%s", strstrip(buf)); + dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL; + + pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" + " %s\n", dev->t10_wwn.unit_serial); + + return count; +} + +/* + * VPD page 0x83 Protocol Identifier + */ +static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item, + char *page) +{ + struct t10_wwn *t10_wwn = to_t10_wwn(item); + struct t10_vpd *vpd; + unsigned char buf[VPD_TMP_BUF_SIZE] = { }; + ssize_t len = 0; + + spin_lock(&t10_wwn->t10_vpd_lock); + list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { + if (!vpd->protocol_identifier_set) + continue; + + transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); + + if (len + strlen(buf) >= PAGE_SIZE) + break; + + len += sprintf(page+len, "%s", buf); + } + spin_unlock(&t10_wwn->t10_vpd_lock); + + return len; +} + +/* + * Generic wrapper for dumping VPD identifiers by association. + */ +#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ +static ssize_t target_wwn_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct t10_wwn *t10_wwn = to_t10_wwn(item); \ + struct t10_vpd *vpd; \ + unsigned char buf[VPD_TMP_BUF_SIZE]; \ + ssize_t len = 0; \ + \ + spin_lock(&t10_wwn->t10_vpd_lock); \ + list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ + if (vpd->association != _assoc) \ + continue; \ + \ + memset(buf, 0, VPD_TMP_BUF_SIZE); \ + transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ + if (len + strlen(buf) >= PAGE_SIZE) \ + break; \ + len += sprintf(page+len, "%s", buf); \ + \ + memset(buf, 0, VPD_TMP_BUF_SIZE); \ + transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ + if (len + strlen(buf) >= PAGE_SIZE) \ + break; \ + len += sprintf(page+len, "%s", buf); \ + \ + memset(buf, 0, VPD_TMP_BUF_SIZE); \ + transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ + if (len + strlen(buf) >= PAGE_SIZE) \ + break; \ + len += sprintf(page+len, "%s", buf); \ + } \ + spin_unlock(&t10_wwn->t10_vpd_lock); \ + \ + return len; \ +} + +/* VPD page 0x83 Association: Logical Unit */ +DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); +/* VPD page 0x83 Association: Target Port */ +DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); +/* VPD page 0x83 Association: SCSI Target Device */ +DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); + +CONFIGFS_ATTR(target_wwn_, vendor_id); +CONFIGFS_ATTR(target_wwn_, product_id); +CONFIGFS_ATTR(target_wwn_, revision); +CONFIGFS_ATTR(target_wwn_, company_id); +CONFIGFS_ATTR(target_wwn_, vpd_unit_serial); +CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier); +CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); +CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port); +CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); + +static struct configfs_attribute *target_core_dev_wwn_attrs[] = { + &target_wwn_attr_vendor_id, + &target_wwn_attr_product_id, + &target_wwn_attr_revision, + &target_wwn_attr_company_id, + &target_wwn_attr_vpd_unit_serial, + &target_wwn_attr_vpd_protocol_identifier, + &target_wwn_attr_vpd_assoc_logical_unit, + &target_wwn_attr_vpd_assoc_target_port, + &target_wwn_attr_vpd_assoc_scsi_target_device, + NULL, +}; + +TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs); + +/* End functions for struct config_item_type tb_dev_wwn_cit */ + +/* Start functions for struct config_item_type tb_dev_pr_cit */ + +static struct se_device *pr_to_dev(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_device, + dev_pr_group); +} + +static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, + char *page) +{ + struct se_node_acl *se_nacl; + struct t10_pr_registration *pr_reg; + char i_buf[PR_REG_ISID_ID_LEN] = { }; + + pr_reg = dev->dev_pr_res_holder; + if (!pr_reg) + return sprintf(page, "No SPC-3 Reservation holder\n"); + + se_nacl = pr_reg->pr_reg_nacl; + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + + return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", + se_nacl->se_tpg->se_tpg_tfo->fabric_name, + se_nacl->initiatorname, i_buf); +} + +static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev, + char *page) +{ + struct se_session *sess = dev->reservation_holder; + struct se_node_acl *se_nacl; + ssize_t len; + + if (sess) { + se_nacl = sess->se_node_acl; + len = sprintf(page, + "SPC-2 Reservation: %s Initiator: %s\n", + se_nacl->se_tpg->se_tpg_tfo->fabric_name, + se_nacl->initiatorname); + } else { + len = sprintf(page, "No SPC-2 Reservation holder\n"); + } + return len; +} + +static ssize_t target_pr_res_holder_show(struct config_item *item, char *page) +{ + struct se_device *dev = pr_to_dev(item); + int ret; + + if (!dev->dev_attrib.emulate_pr) + return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) + return sprintf(page, "Passthrough\n"); + + spin_lock(&dev->dev_reservation_lock); + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) + ret = target_core_dev_pr_show_spc2_res(dev, page); + else + ret = target_core_dev_pr_show_spc3_res(dev, page); + spin_unlock(&dev->dev_reservation_lock); + return ret; +} + +static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item, + char *page) +{ + struct se_device *dev = pr_to_dev(item); + ssize_t len = 0; + + spin_lock(&dev->dev_reservation_lock); + if (!dev->dev_pr_res_holder) { + len = sprintf(page, "No SPC-3 Reservation holder\n"); + } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) { + len = sprintf(page, "SPC-3 Reservation: All Target" + " Ports registration\n"); + } else { + len = sprintf(page, "SPC-3 Reservation: Single" + " Target Port registration\n"); + } + + spin_unlock(&dev->dev_reservation_lock); + return len; +} + +static ssize_t target_pr_res_pr_generation_show(struct config_item *item, + char *page) +{ + return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation); +} + + +static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item, + char *page) +{ + struct se_device *dev = pr_to_dev(item); + struct se_node_acl *se_nacl; + struct se_portal_group *se_tpg; + struct t10_pr_registration *pr_reg; + const struct target_core_fabric_ops *tfo; + ssize_t len = 0; + + spin_lock(&dev->dev_reservation_lock); + pr_reg = dev->dev_pr_res_holder; + if (!pr_reg) { + len = sprintf(page, "No SPC-3 Reservation holder\n"); + goto out_unlock; + } + + se_nacl = pr_reg->pr_reg_nacl; + se_tpg = se_nacl->se_tpg; + tfo = se_tpg->se_tpg_tfo; + + len += sprintf(page+len, "SPC-3 Reservation: %s" + " Target Node Endpoint: %s\n", tfo->fabric_name, + tfo->tpg_get_wwn(se_tpg)); + len += sprintf(page+len, "SPC-3 Reservation: Relative Port" + " Identifier Tag: %hu %s Portal Group Tag: %hu" + " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, + tfo->fabric_name, tfo->tpg_get_tag(se_tpg), + tfo->fabric_name, pr_reg->pr_aptpl_target_lun); + +out_unlock: + spin_unlock(&dev->dev_reservation_lock); + return len; +} + + +static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item, + char *page) +{ + struct se_device *dev = pr_to_dev(item); + const struct target_core_fabric_ops *tfo; + struct t10_pr_registration *pr_reg; + unsigned char buf[384]; + char i_buf[PR_REG_ISID_ID_LEN]; + ssize_t len = 0; + int reg_count = 0; + + len += sprintf(page+len, "SPC-3 PR Registrations:\n"); + + spin_lock(&dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, + pr_reg_list) { + + memset(buf, 0, 384); + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; + core_pr_dump_initiator_port(pr_reg, i_buf, + PR_REG_ISID_ID_LEN); + sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", + tfo->fabric_name, + pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, + pr_reg->pr_res_generation); + + if (len + strlen(buf) >= PAGE_SIZE) + break; + + len += sprintf(page+len, "%s", buf); + reg_count++; + } + spin_unlock(&dev->t10_pr.registration_lock); + + if (!reg_count) + len += sprintf(page+len, "None\n"); + + return len; +} + +static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page) +{ + struct se_device *dev = pr_to_dev(item); + struct t10_pr_registration *pr_reg; + ssize_t len = 0; + + spin_lock(&dev->dev_reservation_lock); + pr_reg = dev->dev_pr_res_holder; + if (pr_reg) { + len = sprintf(page, "SPC-3 Reservation Type: %s\n", + core_scsi3_pr_dump_type(pr_reg->pr_res_type)); + } else { + len = sprintf(page, "No SPC-3 Reservation holder\n"); + } + + spin_unlock(&dev->dev_reservation_lock); + return len; +} + +static ssize_t target_pr_res_type_show(struct config_item *item, char *page) +{ + struct se_device *dev = pr_to_dev(item); + + if (!dev->dev_attrib.emulate_pr) + return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) + return sprintf(page, "SPC_PASSTHROUGH\n"); + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) + return sprintf(page, "SPC2_RESERVATIONS\n"); + + return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); +} + +static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, + char *page) +{ + struct se_device *dev = pr_to_dev(item); + + if (!dev->dev_attrib.emulate_pr || + (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) + return 0; + + return sprintf(page, "APTPL Bit Status: %s\n", + (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); +} + +static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item, + char *page) +{ + struct se_device *dev = pr_to_dev(item); + + if (!dev->dev_attrib.emulate_pr || + (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) + return 0; + + return sprintf(page, "Ready to process PR APTPL metadata..\n"); +} + +enum { + Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, + Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, + Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, + Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err +}; + +static match_table_t tokens = { + {Opt_initiator_fabric, "initiator_fabric=%s"}, + {Opt_initiator_node, "initiator_node=%s"}, + {Opt_initiator_sid, "initiator_sid=%s"}, + {Opt_sa_res_key, "sa_res_key=%s"}, + {Opt_res_holder, "res_holder=%d"}, + {Opt_res_type, "res_type=%d"}, + {Opt_res_scope, "res_scope=%d"}, + {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, + {Opt_mapped_lun, "mapped_lun=%u"}, + {Opt_target_fabric, "target_fabric=%s"}, + {Opt_target_node, "target_node=%s"}, + {Opt_tpgt, "tpgt=%d"}, + {Opt_port_rtpi, "port_rtpi=%d"}, + {Opt_target_lun, "target_lun=%u"}, + {Opt_err, NULL} +}; + +static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_device *dev = pr_to_dev(item); + unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; + unsigned char *t_fabric = NULL, *t_port = NULL; + char *orig, *ptr, *opts; + substring_t args[MAX_OPT_ARGS]; + unsigned long long tmp_ll; + u64 sa_res_key = 0; + u64 mapped_lun = 0, target_lun = 0; + int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; + u16 tpgt = 0; + u8 type = 0; + + if (!dev->dev_attrib.emulate_pr || + (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) + return count; + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) + return count; + + if (dev->export_count) { + pr_debug("Unable to process APTPL metadata while" + " active fabric exports exist\n"); + return -EINVAL; + } + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + while ((ptr = strsep(&opts, ",\n")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_initiator_fabric: + i_fabric = match_strdup(args); + if (!i_fabric) { + ret = -ENOMEM; + goto out; + } + break; + case Opt_initiator_node: + i_port = match_strdup(args); + if (!i_port) { + ret = -ENOMEM; + goto out; + } + if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { + pr_err("APTPL metadata initiator_node=" + " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", + PR_APTPL_MAX_IPORT_LEN); + ret = -EINVAL; + break; + } + break; + case Opt_initiator_sid: + isid = match_strdup(args); + if (!isid) { + ret = -ENOMEM; + goto out; + } + if (strlen(isid) >= PR_REG_ISID_LEN) { + pr_err("APTPL metadata initiator_isid" + "= exceeds PR_REG_ISID_LEN: %d\n", + PR_REG_ISID_LEN); + ret = -EINVAL; + break; + } + break; + case Opt_sa_res_key: + ret = match_u64(args, &tmp_ll); + if (ret < 0) { + pr_err("kstrtoull() failed for sa_res_key=\n"); + goto out; + } + sa_res_key = (u64)tmp_ll; + break; + /* + * PR APTPL Metadata for Reservation + */ + case Opt_res_holder: + ret = match_int(args, &arg); + if (ret) + goto out; + res_holder = arg; + break; + case Opt_res_type: + ret = match_int(args, &arg); + if (ret) + goto out; + type = (u8)arg; + break; + case Opt_res_scope: + ret = match_int(args, &arg); + if (ret) + goto out; + break; + case Opt_res_all_tg_pt: + ret = match_int(args, &arg); + if (ret) + goto out; + all_tg_pt = (int)arg; + break; + case Opt_mapped_lun: + ret = match_u64(args, &tmp_ll); + if (ret) + goto out; + mapped_lun = (u64)tmp_ll; + break; + /* + * PR APTPL Metadata for Target Port + */ + case Opt_target_fabric: + t_fabric = match_strdup(args); + if (!t_fabric) { + ret = -ENOMEM; + goto out; + } + break; + case Opt_target_node: + t_port = match_strdup(args); + if (!t_port) { + ret = -ENOMEM; + goto out; + } + if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { + pr_err("APTPL metadata target_node=" + " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", + PR_APTPL_MAX_TPORT_LEN); + ret = -EINVAL; + break; + } + break; + case Opt_tpgt: + ret = match_int(args, &arg); + if (ret) + goto out; + tpgt = (u16)arg; + break; + case Opt_port_rtpi: + ret = match_int(args, &arg); + if (ret) + goto out; + break; + case Opt_target_lun: + ret = match_u64(args, &tmp_ll); + if (ret) + goto out; + target_lun = (u64)tmp_ll; + break; + default: + break; + } + } + + if (!i_port || !t_port || !sa_res_key) { + pr_err("Illegal parameters for APTPL registration\n"); + ret = -EINVAL; + goto out; + } + + if (res_holder && !(type)) { + pr_err("Illegal PR type: 0x%02x for reservation" + " holder\n", type); + ret = -EINVAL; + goto out; + } + + ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key, + i_port, isid, mapped_lun, t_port, tpgt, target_lun, + res_holder, all_tg_pt, type); +out: + kfree(i_fabric); + kfree(i_port); + kfree(isid); + kfree(t_fabric); + kfree(t_port); + kfree(orig); + return (ret == 0) ? count : ret; +} + + +CONFIGFS_ATTR_RO(target_pr_, res_holder); +CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts); +CONFIGFS_ATTR_RO(target_pr_, res_pr_generation); +CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port); +CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts); +CONFIGFS_ATTR_RO(target_pr_, res_pr_type); +CONFIGFS_ATTR_RO(target_pr_, res_type); +CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active); +CONFIGFS_ATTR(target_pr_, res_aptpl_metadata); + +static struct configfs_attribute *target_core_dev_pr_attrs[] = { + &target_pr_attr_res_holder, + &target_pr_attr_res_pr_all_tgt_pts, + &target_pr_attr_res_pr_generation, + &target_pr_attr_res_pr_holder_tg_port, + &target_pr_attr_res_pr_registered_i_pts, + &target_pr_attr_res_pr_type, + &target_pr_attr_res_type, + &target_pr_attr_res_aptpl_active, + &target_pr_attr_res_aptpl_metadata, + NULL, +}; + +TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs); + +/* End functions for struct config_item_type tb_dev_pr_cit */ + +/* Start functions for struct config_item_type tb_dev_cit */ + +static inline struct se_device *to_device(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_device, dev_group); +} + +static ssize_t target_dev_info_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_device(item); + int bl = 0; + ssize_t read_bytes = 0; + + transport_dump_dev_state(dev, page, &bl); + read_bytes += bl; + read_bytes += dev->transport->show_configfs_dev_params(dev, + page+read_bytes); + return read_bytes; +} + +static ssize_t target_dev_control_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_device *dev = to_device(item); + + return dev->transport->set_configfs_dev_params(dev, page, count); +} + +static ssize_t target_dev_alias_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_device(item); + + if (!(dev->dev_flags & DF_USING_ALIAS)) + return 0; + + return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias); +} + +static ssize_t target_dev_alias_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_device *dev = to_device(item); + struct se_hba *hba = dev->se_hba; + ssize_t read_bytes; + + if (count > (SE_DEV_ALIAS_LEN-1)) { + pr_err("alias count: %d exceeds" + " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, + SE_DEV_ALIAS_LEN-1); + return -EINVAL; + } + + read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); + if (!read_bytes) + return -EINVAL; + if (dev->dev_alias[read_bytes - 1] == '\n') + dev->dev_alias[read_bytes - 1] = '\0'; + + dev->dev_flags |= DF_USING_ALIAS; + + pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&dev->dev_group.cg_item), + dev->dev_alias); + + return read_bytes; +} + +static ssize_t target_dev_udev_path_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_device(item); + + if (!(dev->dev_flags & DF_USING_UDEV_PATH)) + return 0; + + return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path); +} + +static ssize_t target_dev_udev_path_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_device *dev = to_device(item); + struct se_hba *hba = dev->se_hba; + ssize_t read_bytes; + + if (count > (SE_UDEV_PATH_LEN-1)) { + pr_err("udev_path count: %d exceeds" + " SE_UDEV_PATH_LEN-1: %u\n", (int)count, + SE_UDEV_PATH_LEN-1); + return -EINVAL; + } + + read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN, + "%s", page); + if (!read_bytes) + return -EINVAL; + if (dev->udev_path[read_bytes - 1] == '\n') + dev->udev_path[read_bytes - 1] = '\0'; + + dev->dev_flags |= DF_USING_UDEV_PATH; + + pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&dev->dev_group.cg_item), + dev->udev_path); + + return read_bytes; +} + +static ssize_t target_dev_enable_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_device(item); + + return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev)); +} + +static ssize_t target_dev_enable_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_device *dev = to_device(item); + char *ptr; + int ret; + + ptr = strstr(page, "1"); + if (!ptr) { + pr_err("For dev_enable ops, only valid value" + " is \"1\"\n"); + return -EINVAL; + } + + ret = target_configure_device(dev); + if (ret) + return ret; + return count; +} + +static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_device(item); + struct config_item *lu_ci; + struct t10_alua_lu_gp *lu_gp; + struct t10_alua_lu_gp_member *lu_gp_mem; + ssize_t len = 0; + + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!lu_gp_mem) + return 0; + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if (lu_gp) { + lu_ci = &lu_gp->lu_gp_group.cg_item; + len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", + config_item_name(lu_ci), lu_gp->lu_gp_id); + } + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + return len; +} + +static ssize_t target_dev_alua_lu_gp_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_device *dev = to_device(item); + struct se_hba *hba = dev->se_hba; + struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; + struct t10_alua_lu_gp_member *lu_gp_mem; + unsigned char buf[LU_GROUP_NAME_BUF] = { }; + int move = 0; + + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!lu_gp_mem) + return count; + + if (count > LU_GROUP_NAME_BUF) { + pr_err("ALUA LU Group Alias too large!\n"); + return -EINVAL; + } + memcpy(buf, page, count); + /* + * Any ALUA logical unit alias besides "NULL" means we will be + * making a new group association. + */ + if (strcmp(strstrip(buf), "NULL")) { + /* + * core_alua_get_lu_gp_by_name() will increment reference to + * struct t10_alua_lu_gp. This reference is released with + * core_alua_get_lu_gp_by_name below(). + */ + lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); + if (!lu_gp_new) + return -ENODEV; + } + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if (lu_gp) { + /* + * Clearing an existing lu_gp association, and replacing + * with NULL + */ + if (!lu_gp_new) { + pr_debug("Target_Core_ConfigFS: Releasing %s/%s" + " from ALUA LU Group: core/alua/lu_gps/%s, ID:" + " %hu\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&dev->dev_group.cg_item), + config_item_name(&lu_gp->lu_gp_group.cg_item), + lu_gp->lu_gp_id); + + __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + return count; + } + /* + * Removing existing association of lu_gp_mem with lu_gp + */ + __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); + move = 1; + } + /* + * Associate lu_gp_mem with lu_gp_new. + */ + __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" + " core/alua/lu_gps/%s, ID: %hu\n", + (move) ? "Moving" : "Adding", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&dev->dev_group.cg_item), + config_item_name(&lu_gp_new->lu_gp_group.cg_item), + lu_gp_new->lu_gp_id); + + core_alua_put_lu_gp_from_name(lu_gp_new); + return count; +} + +static ssize_t target_dev_lba_map_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_device(item); + struct t10_alua_lba_map *map; + struct t10_alua_lba_map_member *mem; + char *b = page; + int bl = 0; + char state; + + spin_lock(&dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + bl += sprintf(b + bl, "%u %u\n", + dev->t10_alua.lba_map_segment_size, + dev->t10_alua.lba_map_segment_multiplier); + list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { + bl += sprintf(b + bl, "%llu %llu", + map->lba_map_first_lba, map->lba_map_last_lba); + list_for_each_entry(mem, &map->lba_map_mem_list, + lba_map_mem_list) { + switch (mem->lba_map_mem_alua_state) { + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: + state = 'O'; + break; + case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: + state = 'A'; + break; + case ALUA_ACCESS_STATE_STANDBY: + state = 'S'; + break; + case ALUA_ACCESS_STATE_UNAVAILABLE: + state = 'U'; + break; + default: + state = '.'; + break; + } + bl += sprintf(b + bl, " %d:%c", + mem->lba_map_mem_alua_pg_id, state); + } + bl += sprintf(b + bl, "\n"); + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return bl; +} + +static ssize_t target_dev_lba_map_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_device *dev = to_device(item); + struct t10_alua_lba_map *lba_map = NULL; + struct list_head lba_list; + char *map_entries, *orig, *ptr; + char state; + int pg_num = -1, pg; + int ret = 0, num = 0, pg_id, alua_state; + unsigned long start_lba = -1, end_lba = -1; + unsigned long segment_size = -1, segment_mult = -1; + + orig = map_entries = kstrdup(page, GFP_KERNEL); + if (!map_entries) + return -ENOMEM; + + INIT_LIST_HEAD(&lba_list); + while ((ptr = strsep(&map_entries, "\n")) != NULL) { + if (!*ptr) + continue; + + if (num == 0) { + if (sscanf(ptr, "%lu %lu\n", + &segment_size, &segment_mult) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + num++; + continue; + } + if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { + pr_err("Invalid line %d\n", num); + ret = -EINVAL; + break; + } + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing end lba\n", num); + ret = -EINVAL; + break; + } + ptr++; + ptr = strchr(ptr, ' '); + if (!ptr) { + pr_err("Invalid line %d, missing state definitions\n", + num); + ret = -EINVAL; + break; + } + ptr++; + lba_map = core_alua_allocate_lba_map(&lba_list, + start_lba, end_lba); + if (IS_ERR(lba_map)) { + ret = PTR_ERR(lba_map); + break; + } + pg = 0; + while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { + switch (state) { + case 'O': + alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; + break; + case 'A': + alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; + break; + case 'S': + alua_state = ALUA_ACCESS_STATE_STANDBY; + break; + case 'U': + alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; + break; + default: + pr_err("Invalid ALUA state '%c'\n", state); + ret = -EINVAL; + goto out; + } + + ret = core_alua_allocate_lba_map_mem(lba_map, + pg_id, alua_state); + if (ret) { + pr_err("Invalid target descriptor %d:%c " + "at line %d\n", + pg_id, state, num); + break; + } + pg++; + ptr = strchr(ptr, ' '); + if (ptr) + ptr++; + else + break; + } + if (pg_num == -1) + pg_num = pg; + else if (pg != pg_num) { + pr_err("Only %d from %d port groups definitions " + "at line %d\n", pg, pg_num, num); + ret = -EINVAL; + break; + } + num++; + } +out: + if (ret) { + core_alua_free_lba_map(&lba_list); + count = ret; + } else + core_alua_set_lba_map(dev, &lba_list, + segment_size, segment_mult); + kfree(orig); + return count; +} + +CONFIGFS_ATTR_RO(target_dev_, info); +CONFIGFS_ATTR_WO(target_dev_, control); +CONFIGFS_ATTR(target_dev_, alias); +CONFIGFS_ATTR(target_dev_, udev_path); +CONFIGFS_ATTR(target_dev_, enable); +CONFIGFS_ATTR(target_dev_, alua_lu_gp); +CONFIGFS_ATTR(target_dev_, lba_map); + +static struct configfs_attribute *target_core_dev_attrs[] = { + &target_dev_attr_info, + &target_dev_attr_control, + &target_dev_attr_alias, + &target_dev_attr_udev_path, + &target_dev_attr_enable, + &target_dev_attr_alua_lu_gp, + &target_dev_attr_lba_map, + NULL, +}; + +static void target_core_dev_release(struct config_item *item) +{ + struct config_group *dev_cg = to_config_group(item); + struct se_device *dev = + container_of(dev_cg, struct se_device, dev_group); + + target_free_device(dev); +} + +/* + * Used in target_core_fabric_configfs.c to verify valid se_device symlink + * within target_fabric_port_link() + */ +struct configfs_item_operations target_core_dev_item_ops = { + .release = target_core_dev_release, +}; + +TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs); + +/* End functions for struct config_item_type tb_dev_cit */ + +/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ + +static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item) +{ + return container_of(to_config_group(item), struct t10_alua_lu_gp, + lu_gp_group); +} + +static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page) +{ + struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); + + if (!lu_gp->lu_gp_valid_id) + return 0; + return sprintf(page, "%hu\n", lu_gp->lu_gp_id); +} + +static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); + struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; + unsigned long lu_gp_id; + int ret; + + ret = kstrtoul(page, 0, &lu_gp_id); + if (ret < 0) { + pr_err("kstrtoul() returned %d for" + " lu_gp_id\n", ret); + return ret; + } + if (lu_gp_id > 0x0000ffff) { + pr_err("ALUA lu_gp_id: %lu exceeds maximum:" + " 0x0000ffff\n", lu_gp_id); + return -EINVAL; + } + + ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); + if (ret < 0) + return -EINVAL; + + pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" + " Group: core/alua/lu_gps/%s to ID: %hu\n", + config_item_name(&alua_lu_gp_cg->cg_item), + lu_gp->lu_gp_id); + + return count; +} + +static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) +{ + struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); + struct se_device *dev; + struct se_hba *hba; + struct t10_alua_lu_gp_member *lu_gp_mem; + ssize_t len = 0, cur_len; + unsigned char buf[LU_GROUP_NAME_BUF] = { }; + + spin_lock(&lu_gp->lu_gp_lock); + list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { + dev = lu_gp_mem->lu_gp_mem_dev; + hba = dev->se_hba; + + cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", + config_item_name(&hba->hba_group.cg_item), + config_item_name(&dev->dev_group.cg_item)); + cur_len++; /* Extra byte for NULL terminator */ + + if ((cur_len + len) > PAGE_SIZE) { + pr_warn("Ran out of lu_gp_show_attr" + "_members buffer\n"); + break; + } + memcpy(page+len, buf, cur_len); + len += cur_len; + } + spin_unlock(&lu_gp->lu_gp_lock); + + return len; +} + +CONFIGFS_ATTR(target_lu_gp_, lu_gp_id); +CONFIGFS_ATTR_RO(target_lu_gp_, members); + +static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { + &target_lu_gp_attr_lu_gp_id, + &target_lu_gp_attr_members, + NULL, +}; + +static void target_core_alua_lu_gp_release(struct config_item *item) +{ + struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), + struct t10_alua_lu_gp, lu_gp_group); + + core_alua_free_lu_gp(lu_gp); +} + +static struct configfs_item_operations target_core_alua_lu_gp_ops = { + .release = target_core_alua_lu_gp_release, +}; + +static const struct config_item_type target_core_alua_lu_gp_cit = { + .ct_item_ops = &target_core_alua_lu_gp_ops, + .ct_attrs = target_core_alua_lu_gp_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_lu_gp_cit */ + +/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ + +static struct config_group *target_core_alua_create_lu_gp( + struct config_group *group, + const char *name) +{ + struct t10_alua_lu_gp *lu_gp; + struct config_group *alua_lu_gp_cg = NULL; + struct config_item *alua_lu_gp_ci = NULL; + + lu_gp = core_alua_allocate_lu_gp(name, 0); + if (IS_ERR(lu_gp)) + return NULL; + + alua_lu_gp_cg = &lu_gp->lu_gp_group; + alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; + + config_group_init_type_name(alua_lu_gp_cg, name, + &target_core_alua_lu_gp_cit); + + pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" + " Group: core/alua/lu_gps/%s\n", + config_item_name(alua_lu_gp_ci)); + + return alua_lu_gp_cg; + +} + +static void target_core_alua_drop_lu_gp( + struct config_group *group, + struct config_item *item) +{ + struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), + struct t10_alua_lu_gp, lu_gp_group); + + pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" + " Group: core/alua/lu_gps/%s, ID: %hu\n", + config_item_name(item), lu_gp->lu_gp_id); + /* + * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() + * -> target_core_alua_lu_gp_release() + */ + config_item_put(item); +} + +static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { + .make_group = &target_core_alua_create_lu_gp, + .drop_item = &target_core_alua_drop_lu_gp, +}; + +static const struct config_item_type target_core_alua_lu_gps_cit = { + .ct_item_ops = NULL, + .ct_group_ops = &target_core_alua_lu_gps_group_ops, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_lu_gps_cit */ + +/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ + +static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item) +{ + return container_of(to_config_group(item), struct t10_alua_tg_pt_gp, + tg_pt_gp_group); +} + +static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, + char *page) +{ + return sprintf(page, "%d\n", + to_tg_pt_gp(item)->tg_pt_gp_alua_access_state); +} + +static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + unsigned long tmp; + int new_state, ret; + + if (!tg_pt_gp->tg_pt_gp_valid_id) { + pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n"); + return -EINVAL; + } + if (!target_dev_configured(dev)) { + pr_err("Unable to set alua_access_state while device is" + " not configured\n"); + return -ENODEV; + } + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract new ALUA access state from" + " %s\n", page); + return ret; + } + new_state = (int)tmp; + + if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { + pr_err("Unable to process implicit configfs ALUA" + " transition while TPGS_IMPLICIT_ALUA is disabled\n"); + return -EINVAL; + } + if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && + new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { + /* LBA DEPENDENT is only allowed with implicit ALUA */ + pr_err("Unable to process implicit configfs ALUA transition" + " while explicit ALUA management is enabled\n"); + return -EINVAL; + } + + ret = core_alua_do_port_transition(tg_pt_gp, dev, + NULL, NULL, new_state, 0); + return (!ret) ? count : -EINVAL; +} + +static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item, + char *page) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + return sprintf(page, "%s\n", + core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); +} + +static ssize_t target_tg_pt_gp_alua_access_status_store( + struct config_item *item, const char *page, size_t count) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + unsigned long tmp; + int new_status, ret; + + if (!tg_pt_gp->tg_pt_gp_valid_id) { + pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n"); + return -EINVAL; + } + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract new ALUA access status" + " from %s\n", page); + return ret; + } + new_status = (int)tmp; + + if ((new_status != ALUA_STATUS_NONE) && + (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { + pr_err("Illegal ALUA access status: 0x%02x\n", + new_status); + return -EINVAL; + } + + tg_pt_gp->tg_pt_gp_alua_access_status = new_status; + return count; +} + +static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item, + char *page) +{ + return core_alua_show_access_type(to_tg_pt_gp(item), page); +} + +static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item, + const char *page, size_t count) +{ + return core_alua_store_access_type(to_tg_pt_gp(item), page, count); +} + +#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \ +static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \ + struct config_item *item, char *p) \ +{ \ + struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ + return sprintf(p, "%d\n", \ + !!(t->tg_pt_gp_alua_supported_states & _bit)); \ +} \ + \ +static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \ + struct config_item *item, const char *p, size_t c) \ +{ \ + struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ + unsigned long tmp; \ + int ret; \ + \ + if (!t->tg_pt_gp_valid_id) { \ + pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \ + return -EINVAL; \ + } \ + \ + ret = kstrtoul(p, 0, &tmp); \ + if (ret < 0) { \ + pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ + return -EINVAL; \ + } \ + if (tmp > 1) { \ + pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ + return -EINVAL; \ + } \ + if (tmp) \ + t->tg_pt_gp_alua_supported_states |= _bit; \ + else \ + t->tg_pt_gp_alua_supported_states &= ~_bit; \ + \ + return c; \ +} + +ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP); +ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP); +ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP); +ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP); +ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP); +ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP); +ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP); + +static ssize_t target_tg_pt_gp_alua_write_metadata_show( + struct config_item *item, char *page) +{ + return sprintf(page, "%d\n", + to_tg_pt_gp(item)->tg_pt_gp_write_metadata); +} + +static ssize_t target_tg_pt_gp_alua_write_metadata_store( + struct config_item *item, const char *page, size_t count) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract alua_write_metadata\n"); + return ret; + } + + if ((tmp != 0) && (tmp != 1)) { + pr_err("Illegal value for alua_write_metadata:" + " %lu\n", tmp); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; + + return count; +} + +static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item, + char *page) +{ + return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page); +} + +static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item, + const char *page, size_t count) +{ + return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page, + count); +} + +static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item, + char *page) +{ + return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page); +} + +static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item, + const char *page, size_t count) +{ + return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page, + count); +} + +static ssize_t target_tg_pt_gp_implicit_trans_secs_show( + struct config_item *item, char *page) +{ + return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page); +} + +static ssize_t target_tg_pt_gp_implicit_trans_secs_store( + struct config_item *item, const char *page, size_t count) +{ + return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page, + count); +} + +static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item, + char *page) +{ + return core_alua_show_preferred_bit(to_tg_pt_gp(item), page); +} + +static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item, + const char *page, size_t count) +{ + return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count); +} + +static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item, + char *page) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + + if (!tg_pt_gp->tg_pt_gp_valid_id) + return 0; + return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); +} + +static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item, + const char *page, size_t count) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; + unsigned long tg_pt_gp_id; + int ret; + + ret = kstrtoul(page, 0, &tg_pt_gp_id); + if (ret < 0) { + pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n", + page); + return ret; + } + if (tg_pt_gp_id > 0x0000ffff) { + pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n", + tg_pt_gp_id); + return -EINVAL; + } + + ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); + if (ret < 0) + return -EINVAL; + + pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " + "core/alua/tg_pt_gps/%s to ID: %hu\n", + config_item_name(&alua_tg_pt_gp_cg->cg_item), + tg_pt_gp->tg_pt_gp_id); + + return count; +} + +static ssize_t target_tg_pt_gp_members_show(struct config_item *item, + char *page) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); + struct se_lun *lun; + ssize_t len = 0, cur_len; + unsigned char buf[TG_PT_GROUP_NAME_BUF] = { }; + + spin_lock(&tg_pt_gp->tg_pt_gp_lock); + list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, + lun_tg_pt_gp_link) { + struct se_portal_group *tpg = lun->lun_tpg; + + cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" + "/%s\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), + config_item_name(&lun->lun_group.cg_item)); + cur_len++; /* Extra byte for NULL terminator */ + + if ((cur_len + len) > PAGE_SIZE) { + pr_warn("Ran out of lu_gp_show_attr" + "_members buffer\n"); + break; + } + memcpy(page+len, buf, cur_len); + len += cur_len; + } + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + + return len; +} + +CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized); +CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata); +CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs); +CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs); +CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs); +CONFIGFS_ATTR(target_tg_pt_gp_, preferred); +CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id); +CONFIGFS_ATTR_RO(target_tg_pt_gp_, members); + +static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { + &target_tg_pt_gp_attr_alua_access_state, + &target_tg_pt_gp_attr_alua_access_status, + &target_tg_pt_gp_attr_alua_access_type, + &target_tg_pt_gp_attr_alua_support_transitioning, + &target_tg_pt_gp_attr_alua_support_offline, + &target_tg_pt_gp_attr_alua_support_lba_dependent, + &target_tg_pt_gp_attr_alua_support_unavailable, + &target_tg_pt_gp_attr_alua_support_standby, + &target_tg_pt_gp_attr_alua_support_active_nonoptimized, + &target_tg_pt_gp_attr_alua_support_active_optimized, + &target_tg_pt_gp_attr_alua_write_metadata, + &target_tg_pt_gp_attr_nonop_delay_msecs, + &target_tg_pt_gp_attr_trans_delay_msecs, + &target_tg_pt_gp_attr_implicit_trans_secs, + &target_tg_pt_gp_attr_preferred, + &target_tg_pt_gp_attr_tg_pt_gp_id, + &target_tg_pt_gp_attr_members, + NULL, +}; + +static void target_core_alua_tg_pt_gp_release(struct config_item *item) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), + struct t10_alua_tg_pt_gp, tg_pt_gp_group); + + core_alua_free_tg_pt_gp(tg_pt_gp); +} + +static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { + .release = target_core_alua_tg_pt_gp_release, +}; + +static const struct config_item_type target_core_alua_tg_pt_gp_cit = { + .ct_item_ops = &target_core_alua_tg_pt_gp_ops, + .ct_attrs = target_core_alua_tg_pt_gp_attrs, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ + +/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */ + +static struct config_group *target_core_alua_create_tg_pt_gp( + struct config_group *group, + const char *name) +{ + struct t10_alua *alua = container_of(group, struct t10_alua, + alua_tg_pt_gps_group); + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct config_group *alua_tg_pt_gp_cg = NULL; + struct config_item *alua_tg_pt_gp_ci = NULL; + + tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0); + if (!tg_pt_gp) + return NULL; + + alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; + alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; + + config_group_init_type_name(alua_tg_pt_gp_cg, name, + &target_core_alua_tg_pt_gp_cit); + + pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" + " Group: alua/tg_pt_gps/%s\n", + config_item_name(alua_tg_pt_gp_ci)); + + return alua_tg_pt_gp_cg; +} + +static void target_core_alua_drop_tg_pt_gp( + struct config_group *group, + struct config_item *item) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), + struct t10_alua_tg_pt_gp, tg_pt_gp_group); + + pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" + " Group: alua/tg_pt_gps/%s, ID: %hu\n", + config_item_name(item), tg_pt_gp->tg_pt_gp_id); + /* + * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() + * -> target_core_alua_tg_pt_gp_release(). + */ + config_item_put(item); +} + +static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { + .make_group = &target_core_alua_create_tg_pt_gp, + .drop_item = &target_core_alua_drop_tg_pt_gp, +}; + +TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL); + +/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */ + +/* Start functions for struct config_item_type target_core_alua_cit */ + +/* + * target_core_alua_cit is a ConfigFS group that lives under + * /sys/kernel/config/target/core/alua. There are default groups + * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to + * target_core_alua_cit in target_core_init_configfs() below. + */ +static const struct config_item_type target_core_alua_cit = { + .ct_item_ops = NULL, + .ct_attrs = NULL, + .ct_owner = THIS_MODULE, +}; + +/* End functions for struct config_item_type target_core_alua_cit */ + +/* Start functions for struct config_item_type tb_dev_stat_cit */ + +static struct config_group *target_core_stat_mkdir( + struct config_group *group, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static void target_core_stat_rmdir( + struct config_group *group, + struct config_item *item) +{ + return; +} + +static struct configfs_group_operations target_core_stat_group_ops = { + .make_group = &target_core_stat_mkdir, + .drop_item = &target_core_stat_rmdir, +}; + +TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL); + +/* End functions for struct config_item_type tb_dev_stat_cit */ + +/* Start functions for struct config_item_type target_core_hba_cit */ + +static struct config_group *target_core_make_subdev( + struct config_group *group, + const char *name) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + struct config_item *hba_ci = &group->cg_item; + struct se_hba *hba = item_to_hba(hba_ci); + struct target_backend *tb = hba->backend; + struct se_device *dev; + int errno = -ENOMEM, ret; + + ret = mutex_lock_interruptible(&hba->hba_access_mutex); + if (ret) + return ERR_PTR(ret); + + dev = target_alloc_device(hba, name); + if (!dev) + goto out_unlock; + + config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit); + + config_group_init_type_name(&dev->dev_action_group, "action", + &tb->tb_dev_action_cit); + configfs_add_default_group(&dev->dev_action_group, &dev->dev_group); + + config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", + &tb->tb_dev_attrib_cit); + configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group); + + config_group_init_type_name(&dev->dev_pr_group, "pr", + &tb->tb_dev_pr_cit); + configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group); + + config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", + &tb->tb_dev_wwn_cit); + configfs_add_default_group(&dev->t10_wwn.t10_wwn_group, + &dev->dev_group); + + config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, + "alua", &tb->tb_dev_alua_tg_pt_gps_cit); + configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group, + &dev->dev_group); + + config_group_init_type_name(&dev->dev_stat_grps.stat_group, + "statistics", &tb->tb_dev_stat_cit); + configfs_add_default_group(&dev->dev_stat_grps.stat_group, + &dev->dev_group); + + /* + * Add core/$HBA/$DEV/alua/default_tg_pt_gp + */ + tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1); + if (!tg_pt_gp) + goto out_free_device; + dev->t10_alua.default_tg_pt_gp = tg_pt_gp; + + config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, + "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); + configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group, + &dev->t10_alua.alua_tg_pt_gps_group); + + /* + * Add core/$HBA/$DEV/statistics/ default groups + */ + target_stat_setup_dev_default_groups(dev); + + mutex_lock(&target_devices_lock); + target_devices++; + mutex_unlock(&target_devices_lock); + + mutex_unlock(&hba->hba_access_mutex); + return &dev->dev_group; + +out_free_device: + target_free_device(dev); +out_unlock: + mutex_unlock(&hba->hba_access_mutex); + return ERR_PTR(errno); +} + +static void target_core_drop_subdev( + struct config_group *group, + struct config_item *item) +{ + struct config_group *dev_cg = to_config_group(item); + struct se_device *dev = + container_of(dev_cg, struct se_device, dev_group); + struct se_hba *hba; + + hba = item_to_hba(&dev->se_hba->hba_group.cg_item); + + mutex_lock(&hba->hba_access_mutex); + + configfs_remove_default_groups(&dev->dev_stat_grps.stat_group); + configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group); + + /* + * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp + * directly from target_core_alua_tg_pt_gp_release(). + */ + dev->t10_alua.default_tg_pt_gp = NULL; + + configfs_remove_default_groups(dev_cg); + + /* + * se_dev is released from target_core_dev_item_ops->release() + */ + config_item_put(item); + + mutex_lock(&target_devices_lock); + target_devices--; + mutex_unlock(&target_devices_lock); + + mutex_unlock(&hba->hba_access_mutex); +} + +static struct configfs_group_operations target_core_hba_group_ops = { + .make_group = target_core_make_subdev, + .drop_item = target_core_drop_subdev, +}; + + +static inline struct se_hba *to_hba(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_hba, hba_group); +} + +static ssize_t target_hba_info_show(struct config_item *item, char *page) +{ + struct se_hba *hba = to_hba(item); + + return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", + hba->hba_id, hba->backend->ops->name, + TARGET_CORE_VERSION); +} + +static ssize_t target_hba_mode_show(struct config_item *item, char *page) +{ + struct se_hba *hba = to_hba(item); + int hba_mode = 0; + + if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) + hba_mode = 1; + + return sprintf(page, "%d\n", hba_mode); +} + +static ssize_t target_hba_mode_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_hba *hba = to_hba(item); + unsigned long mode_flag; + int ret; + + if (hba->backend->ops->pmode_enable_hba == NULL) + return -EINVAL; + + ret = kstrtoul(page, 0, &mode_flag); + if (ret < 0) { + pr_err("Unable to extract hba mode flag: %d\n", ret); + return ret; + } + + if (hba->dev_count) { + pr_err("Unable to set hba_mode with active devices\n"); + return -EINVAL; + } + + ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag); + if (ret < 0) + return -EINVAL; + if (ret > 0) + hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; + else if (ret == 0) + hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; + + return count; +} + +CONFIGFS_ATTR_RO(target_, hba_info); +CONFIGFS_ATTR(target_, hba_mode); + +static void target_core_hba_release(struct config_item *item) +{ + struct se_hba *hba = container_of(to_config_group(item), + struct se_hba, hba_group); + core_delete_hba(hba); +} + +static struct configfs_attribute *target_core_hba_attrs[] = { + &target_attr_hba_info, + &target_attr_hba_mode, + NULL, +}; + +static struct configfs_item_operations target_core_hba_item_ops = { + .release = target_core_hba_release, +}; + +static const struct config_item_type target_core_hba_cit = { + .ct_item_ops = &target_core_hba_item_ops, + .ct_group_ops = &target_core_hba_group_ops, + .ct_attrs = target_core_hba_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *target_core_call_addhbatotarget( + struct config_group *group, + const char *name) +{ + char *se_plugin_str, *str, *str2; + struct se_hba *hba; + char buf[TARGET_CORE_NAME_MAX_LEN] = { }; + unsigned long plugin_dep_id = 0; + int ret; + + if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { + pr_err("Passed *name strlen(): %d exceeds" + " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), + TARGET_CORE_NAME_MAX_LEN); + return ERR_PTR(-ENAMETOOLONG); + } + snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); + + str = strstr(buf, "_"); + if (!str) { + pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); + return ERR_PTR(-EINVAL); + } + se_plugin_str = buf; + /* + * Special case for subsystem plugins that have "_" in their names. + * Namely rd_direct and rd_mcp.. + */ + str2 = strstr(str+1, "_"); + if (str2) { + *str2 = '\0'; /* Terminate for *se_plugin_str */ + str2++; /* Skip to start of plugin dependent ID */ + str = str2; + } else { + *str = '\0'; /* Terminate for *se_plugin_str */ + str++; /* Skip to start of plugin dependent ID */ + } + + ret = kstrtoul(str, 0, &plugin_dep_id); + if (ret < 0) { + pr_err("kstrtoul() returned %d for" + " plugin_dep_id\n", ret); + return ERR_PTR(ret); + } + /* + * Load up TCM subsystem plugins if they have not already been loaded. + */ + transport_subsystem_check_init(); + + hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); + if (IS_ERR(hba)) + return ERR_CAST(hba); + + config_group_init_type_name(&hba->hba_group, name, + &target_core_hba_cit); + + return &hba->hba_group; +} + +static void target_core_call_delhbafromtarget( + struct config_group *group, + struct config_item *item) +{ + /* + * core_delete_hba() is called from target_core_hba_item_ops->release() + * -> target_core_hba_release() + */ + config_item_put(item); +} + +static struct configfs_group_operations target_core_group_ops = { + .make_group = target_core_call_addhbatotarget, + .drop_item = target_core_call_delhbafromtarget, +}; + +static const struct config_item_type target_core_cit = { + .ct_item_ops = NULL, + .ct_group_ops = &target_core_group_ops, + .ct_attrs = NULL, + .ct_owner = THIS_MODULE, +}; + +/* Stop functions for struct config_item_type target_core_hba_cit */ + +void target_setup_backend_cits(struct target_backend *tb) +{ + target_core_setup_dev_cit(tb); + target_core_setup_dev_action_cit(tb); + target_core_setup_dev_attrib_cit(tb); + target_core_setup_dev_pr_cit(tb); + target_core_setup_dev_wwn_cit(tb); + target_core_setup_dev_alua_tg_pt_gps_cit(tb); + target_core_setup_dev_stat_cit(tb); +} + +static void target_init_dbroot(void) +{ + struct file *fp; + + snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED); + fp = filp_open(db_root_stage, O_RDONLY, 0); + if (IS_ERR(fp)) { + pr_err("db_root: cannot open: %s\n", db_root_stage); + return; + } + if (!S_ISDIR(file_inode(fp)->i_mode)) { + filp_close(fp, NULL); + pr_err("db_root: not a valid directory: %s\n", db_root_stage); + return; + } + filp_close(fp, NULL); + + strncpy(db_root, db_root_stage, DB_ROOT_LEN); + pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); +} + +static int __init target_core_init_configfs(void) +{ + struct configfs_subsystem *subsys = &target_core_fabrics; + struct t10_alua_lu_gp *lu_gp; + int ret; + + pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" + " Engine: %s on %s/%s on "UTS_RELEASE"\n", + TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); + + config_group_init(&subsys->su_group); + mutex_init(&subsys->su_mutex); + + ret = init_se_kmem_caches(); + if (ret < 0) + return ret; + /* + * Create $CONFIGFS/target/core default group for HBA <-> Storage Object + * and ALUA Logical Unit Group and Target Port Group infrastructure. + */ + config_group_init_type_name(&target_core_hbagroup, "core", + &target_core_cit); + configfs_add_default_group(&target_core_hbagroup, &subsys->su_group); + + /* + * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ + */ + config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit); + configfs_add_default_group(&alua_group, &target_core_hbagroup); + + /* + * Add ALUA Logical Unit Group and Target Port Group ConfigFS + * groups under /sys/kernel/config/target/core/alua/ + */ + config_group_init_type_name(&alua_lu_gps_group, "lu_gps", + &target_core_alua_lu_gps_cit); + configfs_add_default_group(&alua_lu_gps_group, &alua_group); + + /* + * Add core/alua/lu_gps/default_lu_gp + */ + lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); + if (IS_ERR(lu_gp)) { + ret = -ENOMEM; + goto out_global; + } + + config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", + &target_core_alua_lu_gp_cit); + configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group); + + default_lu_gp = lu_gp; + + /* + * Register the target_core_mod subsystem with configfs. + */ + ret = configfs_register_subsystem(subsys); + if (ret < 0) { + pr_err("Error %d while registering subsystem %s\n", + ret, subsys->su_group.cg_item.ci_namebuf); + goto out_global; + } + pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" + " Infrastructure: "TARGET_CORE_VERSION" on %s/%s" + " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); + /* + * Register built-in RAMDISK subsystem logic for virtual LUN 0 + */ + ret = rd_module_init(); + if (ret < 0) + goto out; + + ret = core_dev_setup_virtual_lun0(); + if (ret < 0) + goto out; + + ret = target_xcopy_setup_pt(); + if (ret < 0) + goto out; + + target_init_dbroot(); + + return 0; + +out: + configfs_unregister_subsystem(subsys); + core_dev_release_virtual_lun0(); + rd_module_exit(); +out_global: + if (default_lu_gp) { + core_alua_free_lu_gp(default_lu_gp); + default_lu_gp = NULL; + } + release_se_kmem_caches(); + return ret; +} + +static void __exit target_core_exit_configfs(void) +{ + configfs_remove_default_groups(&alua_lu_gps_group); + configfs_remove_default_groups(&alua_group); + configfs_remove_default_groups(&target_core_hbagroup); + + /* + * We expect subsys->su_group.default_groups to be released + * by configfs subsystem provider logic.. + */ + configfs_unregister_subsystem(&target_core_fabrics); + + core_alua_free_lu_gp(default_lu_gp); + default_lu_gp = NULL; + + pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" + " Infrastructure\n"); + + core_dev_release_virtual_lun0(); + rd_module_exit(); + target_xcopy_release_pt(); + release_se_kmem_caches(); +} + +MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(target_core_init_configfs); +module_exit(target_core_exit_configfs); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c new file mode 100644 index 0000000000..b6523d4b92 --- /dev/null +++ b/drivers/target/target_core_device.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_device.c (based on iscsi_target_device.c) + * + * This file contains the TCM Virtual Device and Disk Transport + * agnostic related functions. + * + * (c) Copyright 2003-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/net.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/kthread.h> +#include <linux/in.h> +#include <linux/export.h> +#include <linux/t10-pi.h> +#include <asm/unaligned.h> +#include <net/sock.h> +#include <net/tcp.h> +#include <scsi/scsi_common.h> +#include <scsi/scsi_proto.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +static DEFINE_MUTEX(device_mutex); +static LIST_HEAD(device_list); +static DEFINE_IDR(devices_idr); + +static struct se_hba *lun0_hba; +/* not static, needed by tpg.c */ +struct se_device *g_lun0_dev; + +sense_reason_t +transport_lookup_cmd_lun(struct se_cmd *se_cmd) +{ + struct se_lun *se_lun = NULL; + struct se_session *se_sess = se_cmd->se_sess; + struct se_node_acl *nacl = se_sess->se_node_acl; + struct se_dev_entry *deve; + sense_reason_t ret = TCM_NO_SENSE; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); + if (deve) { + atomic_long_inc(&deve->total_cmds); + + if (se_cmd->data_direction == DMA_TO_DEVICE) + atomic_long_add(se_cmd->data_length, + &deve->write_bytes); + else if (se_cmd->data_direction == DMA_FROM_DEVICE) + atomic_long_add(se_cmd->data_length, + &deve->read_bytes); + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + deve->lun_access_ro) { + pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + " Access for 0x%08llx\n", + se_cmd->se_tfo->fabric_name, + se_cmd->orig_fe_lun); + rcu_read_unlock(); + return TCM_WRITE_PROTECTED; + } + + se_lun = deve->se_lun; + + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { + se_lun = NULL; + goto out_unlock; + } + + se_cmd->se_lun = se_lun; + se_cmd->pr_res_key = deve->pr_res_key; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + se_cmd->lun_ref_active = true; + } +out_unlock: + rcu_read_unlock(); + + if (!se_lun) { + /* + * Use the se_portal_group->tpg_virt_lun0 to allow for + * REPORT_LUNS, et al to be returned when no active + * MappedLUN=0 exists for this Initiator Port. + */ + if (se_cmd->orig_fe_lun != 0) { + pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" + " Access for 0x%08llx from %s\n", + se_cmd->se_tfo->fabric_name, + se_cmd->orig_fe_lun, + nacl->initiatorname); + return TCM_NON_EXISTENT_LUN; + } + + /* + * Force WRITE PROTECT for virtual LUN 0 + */ + if ((se_cmd->data_direction != DMA_FROM_DEVICE) && + (se_cmd->data_direction != DMA_NONE)) + return TCM_WRITE_PROTECTED; + + se_lun = se_sess->se_tpg->tpg_virt_lun0; + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) + return TCM_NON_EXISTENT_LUN; + + se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + se_cmd->lun_ref_active = true; + } + /* + * RCU reference protected by percpu se_lun->lun_ref taken above that + * must drop to zero (including initial reference) before this se_lun + * pointer can be kfree_rcu() by the final se_lun->lun_group put via + * target_core_fabric_configfs.c:target_fabric_port_release + */ + se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); + atomic_long_inc(&se_cmd->se_dev->num_cmds); + + if (se_cmd->data_direction == DMA_TO_DEVICE) + atomic_long_add(se_cmd->data_length, + &se_cmd->se_dev->write_bytes); + else if (se_cmd->data_direction == DMA_FROM_DEVICE) + atomic_long_add(se_cmd->data_length, + &se_cmd->se_dev->read_bytes); + + return ret; +} +EXPORT_SYMBOL(transport_lookup_cmd_lun); + +int transport_lookup_tmr_lun(struct se_cmd *se_cmd) +{ + struct se_dev_entry *deve; + struct se_lun *se_lun = NULL; + struct se_session *se_sess = se_cmd->se_sess; + struct se_node_acl *nacl = se_sess->se_node_acl; + struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; + unsigned long flags; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); + if (deve) { + se_lun = deve->se_lun; + + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { + se_lun = NULL; + goto out_unlock; + } + + se_cmd->se_lun = se_lun; + se_cmd->pr_res_key = deve->pr_res_key; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + se_cmd->lun_ref_active = true; + } +out_unlock: + rcu_read_unlock(); + + if (!se_lun) { + pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" + " Access for 0x%08llx for %s\n", + se_cmd->se_tfo->fabric_name, + se_cmd->orig_fe_lun, + nacl->initiatorname); + return -ENODEV; + } + se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); + se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); + + spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); + list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); + spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); + + return 0; +} +EXPORT_SYMBOL(transport_lookup_tmr_lun); + +bool target_lun_is_rdonly(struct se_cmd *cmd) +{ + struct se_session *se_sess = cmd->se_sess; + struct se_dev_entry *deve; + bool ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); + ret = deve && deve->lun_access_ro; + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL(target_lun_is_rdonly); + +/* + * This function is called from core_scsi3_emulate_pro_register_and_move() + * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref + * when a matching rtpi is found. + */ +struct se_dev_entry *core_get_se_deve_from_rtpi( + struct se_node_acl *nacl, + u16 rtpi) +{ + struct se_dev_entry *deve; + struct se_lun *lun; + struct se_portal_group *tpg = nacl->se_tpg; + + rcu_read_lock(); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { + lun = deve->se_lun; + if (!lun) { + pr_err("%s device entries device pointer is" + " NULL, but Initiator has access.\n", + tpg->se_tpg_tfo->fabric_name); + continue; + } + if (lun->lun_tpg->tpg_rtpi != rtpi) + continue; + + kref_get(&deve->pr_kref); + rcu_read_unlock(); + + return deve; + } + rcu_read_unlock(); + + return NULL; +} + +void core_free_device_list_for_node( + struct se_node_acl *nacl, + struct se_portal_group *tpg) +{ + struct se_dev_entry *deve; + + mutex_lock(&nacl->lun_entry_mutex); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) + core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); + mutex_unlock(&nacl->lun_entry_mutex); +} + +void core_update_device_list_access( + u64 mapped_lun, + bool lun_access_ro, + struct se_node_acl *nacl) +{ + struct se_dev_entry *deve; + + mutex_lock(&nacl->lun_entry_mutex); + deve = target_nacl_find_deve(nacl, mapped_lun); + if (deve) + deve->lun_access_ro = lun_access_ro; + mutex_unlock(&nacl->lun_entry_mutex); +} + +/* + * Called with rcu_read_lock or nacl->device_list_lock held. + */ +struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) +{ + struct se_dev_entry *deve; + + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) + if (deve->mapped_lun == mapped_lun) + return deve; + + return NULL; +} +EXPORT_SYMBOL(target_nacl_find_deve); + +void target_pr_kref_release(struct kref *kref) +{ + struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, + pr_kref); + complete(&deve->pr_comp); +} + +/* + * Establish UA condition on SCSI device - all LUNs + */ +void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) +{ + struct se_dev_entry *se_deve; + struct se_lun *lun; + + spin_lock(&dev->se_port_lock); + list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { + + spin_lock(&lun->lun_deve_lock); + list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) + core_scsi3_ua_allocate(se_deve, asc, ascq); + spin_unlock(&lun->lun_deve_lock); + } + spin_unlock(&dev->se_port_lock); +} + +static void +target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, + bool skip_new) +{ + struct se_dev_entry *tmp; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { + if (skip_new && tmp == new) + continue; + core_scsi3_ua_allocate(tmp, 0x3F, + ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); + } + rcu_read_unlock(); +} + +int core_enable_device_list_for_node( + struct se_lun *lun, + struct se_lun_acl *lun_acl, + u64 mapped_lun, + bool lun_access_ro, + struct se_node_acl *nacl, + struct se_portal_group *tpg) +{ + struct se_dev_entry *orig, *new; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) { + pr_err("Unable to allocate se_dev_entry memory\n"); + return -ENOMEM; + } + + spin_lock_init(&new->ua_lock); + INIT_LIST_HEAD(&new->ua_list); + INIT_LIST_HEAD(&new->lun_link); + + new->mapped_lun = mapped_lun; + kref_init(&new->pr_kref); + init_completion(&new->pr_comp); + + new->lun_access_ro = lun_access_ro; + new->creation_time = get_jiffies_64(); + new->attach_count++; + + mutex_lock(&nacl->lun_entry_mutex); + orig = target_nacl_find_deve(nacl, mapped_lun); + if (orig && orig->se_lun) { + struct se_lun *orig_lun = orig->se_lun; + + if (orig_lun != lun) { + pr_err("Existing orig->se_lun doesn't match new lun" + " for dynamic -> explicit NodeACL conversion:" + " %s\n", nacl->initiatorname); + mutex_unlock(&nacl->lun_entry_mutex); + kfree(new); + return -EINVAL; + } + if (orig->se_lun_acl != NULL) { + pr_warn_ratelimited("Detected existing explicit" + " se_lun_acl->se_lun_group reference for %s" + " mapped_lun: %llu, failing\n", + nacl->initiatorname, mapped_lun); + mutex_unlock(&nacl->lun_entry_mutex); + kfree(new); + return -EINVAL; + } + + new->se_lun = lun; + new->se_lun_acl = lun_acl; + hlist_del_rcu(&orig->link); + hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); + mutex_unlock(&nacl->lun_entry_mutex); + + spin_lock(&lun->lun_deve_lock); + list_del(&orig->lun_link); + list_add_tail(&new->lun_link, &lun->lun_deve_list); + spin_unlock(&lun->lun_deve_lock); + + kref_put(&orig->pr_kref, target_pr_kref_release); + wait_for_completion(&orig->pr_comp); + + target_luns_data_has_changed(nacl, new, true); + kfree_rcu(orig, rcu_head); + return 0; + } + + new->se_lun = lun; + new->se_lun_acl = lun_acl; + hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); + mutex_unlock(&nacl->lun_entry_mutex); + + spin_lock(&lun->lun_deve_lock); + list_add_tail(&new->lun_link, &lun->lun_deve_list); + spin_unlock(&lun->lun_deve_lock); + + target_luns_data_has_changed(nacl, new, true); + return 0; +} + +void core_disable_device_list_for_node( + struct se_lun *lun, + struct se_dev_entry *orig, + struct se_node_acl *nacl, + struct se_portal_group *tpg) +{ + /* + * rcu_dereference_raw protected by se_lun->lun_group symlink + * reference to se_device->dev_group. + */ + struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + + lockdep_assert_held(&nacl->lun_entry_mutex); + + /* + * If the MappedLUN entry is being disabled, the entry in + * lun->lun_deve_list must be removed now before clearing the + * struct se_dev_entry pointers below as logic in + * core_alua_do_transition_tg_pt() depends on these being present. + * + * deve->se_lun_acl will be NULL for demo-mode created LUNs + * that have not been explicitly converted to MappedLUNs -> + * struct se_lun_acl, but we remove deve->lun_link from + * lun->lun_deve_list. This also means that active UAs and + * NodeACL context specific PR metadata for demo-mode + * MappedLUN *deve will be released below.. + */ + spin_lock(&lun->lun_deve_lock); + list_del(&orig->lun_link); + spin_unlock(&lun->lun_deve_lock); + /* + * Disable struct se_dev_entry LUN ACL mapping + */ + core_scsi3_ua_release_all(orig); + + hlist_del_rcu(&orig->link); + clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); + orig->lun_access_ro = false; + orig->creation_time = 0; + orig->attach_count--; + /* + * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 + * or REGISTER_AND_MOVE PR operation to complete. + */ + kref_put(&orig->pr_kref, target_pr_kref_release); + wait_for_completion(&orig->pr_comp); + + kfree_rcu(orig, rcu_head); + + core_scsi3_free_pr_reg_from_nacl(dev, nacl); + target_luns_data_has_changed(nacl, NULL, false); +} + +/* core_clear_lun_from_tpg(): + * + * + */ +void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) +{ + struct se_node_acl *nacl; + struct se_dev_entry *deve; + + mutex_lock(&tpg->acl_node_mutex); + list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { + + mutex_lock(&nacl->lun_entry_mutex); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { + if (lun != deve->se_lun) + continue; + + core_disable_device_list_for_node(lun, deve, nacl, tpg); + } + mutex_unlock(&nacl->lun_entry_mutex); + } + mutex_unlock(&tpg->acl_node_mutex); +} + +static void se_release_vpd_for_dev(struct se_device *dev) +{ + struct t10_vpd *vpd, *vpd_tmp; + + spin_lock(&dev->t10_wwn.t10_vpd_lock); + list_for_each_entry_safe(vpd, vpd_tmp, + &dev->t10_wwn.t10_vpd_list, vpd_list) { + list_del(&vpd->vpd_list); + kfree(vpd); + } + spin_unlock(&dev->t10_wwn.t10_vpd_lock); +} + +static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) +{ + u32 aligned_max_sectors; + u32 alignment; + /* + * Limit max_sectors to a PAGE_SIZE aligned value for modern + * transport_allocate_data_tasks() operation. + */ + alignment = max(1ul, PAGE_SIZE / block_size); + aligned_max_sectors = rounddown(max_sectors, alignment); + + if (max_sectors != aligned_max_sectors) + pr_info("Rounding down aligned max_sectors from %u to %u\n", + max_sectors, aligned_max_sectors); + + return aligned_max_sectors; +} + +int core_dev_add_lun( + struct se_portal_group *tpg, + struct se_device *dev, + struct se_lun *lun) +{ + int rc; + + rc = core_tpg_add_lun(tpg, lun, false, dev); + if (rc < 0) + return rc; + + pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" + " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, + tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); + /* + * Update LUN maps for dynamically added initiators when + * generate_node_acl is enabled. + */ + if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { + struct se_node_acl *acl; + + mutex_lock(&tpg->acl_node_mutex); + list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { + if (acl->dynamic_node_acl && + (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || + !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { + core_tpg_add_node_to_devs(acl, tpg, lun); + } + } + mutex_unlock(&tpg->acl_node_mutex); + } + + return 0; +} + +/* core_dev_del_lun(): + * + * + */ +void core_dev_del_lun( + struct se_portal_group *tpg, + struct se_lun *lun) +{ + pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" + " device object\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, + tpg->se_tpg_tfo->fabric_name); + + core_tpg_remove_lun(tpg, lun); +} + +struct se_lun_acl *core_dev_init_initiator_node_lun_acl( + struct se_portal_group *tpg, + struct se_node_acl *nacl, + u64 mapped_lun, + int *ret) +{ + struct se_lun_acl *lacl; + + if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { + pr_err("%s InitiatorName exceeds maximum size.\n", + tpg->se_tpg_tfo->fabric_name); + *ret = -EOVERFLOW; + return NULL; + } + lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); + if (!lacl) { + pr_err("Unable to allocate memory for struct se_lun_acl.\n"); + *ret = -ENOMEM; + return NULL; + } + + lacl->mapped_lun = mapped_lun; + lacl->se_lun_nacl = nacl; + + return lacl; +} + +int core_dev_add_initiator_node_lun_acl( + struct se_portal_group *tpg, + struct se_lun_acl *lacl, + struct se_lun *lun, + bool lun_access_ro) +{ + struct se_node_acl *nacl = lacl->se_lun_nacl; + /* + * rcu_dereference_raw protected by se_lun->lun_group symlink + * reference to se_device->dev_group. + */ + struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + + if (!nacl) + return -EINVAL; + + if (lun->lun_access_ro) + lun_access_ro = true; + + lacl->se_lun = lun; + + if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, + lun_access_ro, nacl, tpg) < 0) + return -EINVAL; + + pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " + " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, + lun_access_ro ? "RO" : "RW", + nacl->initiatorname); + /* + * Check to see if there are any existing persistent reservation APTPL + * pre-registrations that need to be enabled for this LUN ACL.. + */ + core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, + lacl->mapped_lun); + return 0; +} + +int core_dev_del_initiator_node_lun_acl( + struct se_lun *lun, + struct se_lun_acl *lacl) +{ + struct se_portal_group *tpg = lun->lun_tpg; + struct se_node_acl *nacl; + struct se_dev_entry *deve; + + nacl = lacl->se_lun_nacl; + if (!nacl) + return -EINVAL; + + mutex_lock(&nacl->lun_entry_mutex); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (deve) + core_disable_device_list_for_node(lun, deve, nacl, tpg); + mutex_unlock(&nacl->lun_entry_mutex); + + pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" + " InitiatorNode: %s Mapped LUN: %llu\n", + tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, + nacl->initiatorname, lacl->mapped_lun); + + return 0; +} + +void core_dev_free_initiator_node_lun_acl( + struct se_portal_group *tpg, + struct se_lun_acl *lacl) +{ + pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" + " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), + tpg->se_tpg_tfo->fabric_name, + lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); + + kfree(lacl); +} + +static void scsi_dump_inquiry(struct se_device *dev) +{ + struct t10_wwn *wwn = &dev->t10_wwn; + int device_type = dev->transport->get_device_type(dev); + + /* + * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer + */ + pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", + wwn->vendor); + pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", + wwn->model); + pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", + wwn->revision); + pr_debug(" Type: %s ", scsi_device_type(device_type)); +} + +struct se_device *target_alloc_device(struct se_hba *hba, const char *name) +{ + struct se_device *dev; + struct se_lun *xcopy_lun; + int i; + + dev = hba->backend->ops->alloc_device(hba, name); + if (!dev) + return NULL; + + dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); + if (!dev->queues) { + dev->transport->free_device(dev); + return NULL; + } + + dev->queue_cnt = nr_cpu_ids; + for (i = 0; i < dev->queue_cnt; i++) { + struct se_device_queue *q; + + q = &dev->queues[i]; + INIT_LIST_HEAD(&q->state_list); + spin_lock_init(&q->lock); + + init_llist_head(&q->sq.cmd_list); + INIT_WORK(&q->sq.work, target_queued_submit_work); + } + + dev->se_hba = hba; + dev->transport = hba->backend->ops; + dev->transport_flags = dev->transport->transport_flags_default; + dev->prot_length = sizeof(struct t10_pi_tuple); + dev->hba_index = hba->hba_index; + + INIT_LIST_HEAD(&dev->dev_sep_list); + INIT_LIST_HEAD(&dev->dev_tmr_list); + INIT_LIST_HEAD(&dev->delayed_cmd_list); + INIT_LIST_HEAD(&dev->qf_cmd_list); + spin_lock_init(&dev->delayed_cmd_lock); + spin_lock_init(&dev->dev_reservation_lock); + spin_lock_init(&dev->se_port_lock); + spin_lock_init(&dev->se_tmr_lock); + spin_lock_init(&dev->qf_cmd_lock); + sema_init(&dev->caw_sem, 1); + INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); + spin_lock_init(&dev->t10_wwn.t10_vpd_lock); + INIT_LIST_HEAD(&dev->t10_pr.registration_list); + INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); + spin_lock_init(&dev->t10_pr.registration_lock); + spin_lock_init(&dev->t10_pr.aptpl_reg_lock); + INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); + spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); + INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); + spin_lock_init(&dev->t10_alua.lba_map_lock); + + INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); + mutex_init(&dev->lun_reset_mutex); + + dev->t10_wwn.t10_dev = dev; + /* + * Use OpenFabrics IEEE Company ID: 00 14 05 + */ + dev->t10_wwn.company_id = 0x001405; + + dev->t10_alua.t10_dev = dev; + + dev->dev_attrib.da_dev = dev; + dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; + dev->dev_attrib.emulate_dpo = 1; + dev->dev_attrib.emulate_fua_write = 1; + dev->dev_attrib.emulate_fua_read = 1; + dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; + dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; + dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; + dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; + dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; + dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; + dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; + dev->dev_attrib.emulate_pr = DA_EMULATE_PR; + dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; + dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; + dev->dev_attrib.is_nonrot = DA_IS_NONROT; + dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; + dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; + dev->dev_attrib.max_unmap_block_desc_count = + DA_MAX_UNMAP_BLOCK_DESC_COUNT; + dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; + dev->dev_attrib.unmap_granularity_alignment = + DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; + dev->dev_attrib.unmap_zeroes_data = + DA_UNMAP_ZEROES_DATA_DEFAULT; + dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; + + xcopy_lun = &dev->xcopy_lun; + rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); + init_completion(&xcopy_lun->lun_shutdown_comp); + INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); + INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); + mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); + xcopy_lun->lun_tpg = &xcopy_pt_tpg; + + /* Preload the default INQUIRY const values */ + strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, + sizeof(dev->t10_wwn.model)); + strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, + sizeof(dev->t10_wwn.revision)); + + return dev; +} + +/* + * Check if the underlying struct block_device supports discard and if yes + * configure the UNMAP parameters. + */ +bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, + struct block_device *bdev) +{ + int block_size = bdev_logical_block_size(bdev); + + if (!bdev_max_discard_sectors(bdev)) + return false; + + attrib->max_unmap_lba_count = + bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); + /* + * Currently hardcoded to 1 in Linux/SCSI code.. + */ + attrib->max_unmap_block_desc_count = 1; + attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; + attrib->unmap_granularity_alignment = + bdev_discard_alignment(bdev) / block_size; + return true; +} +EXPORT_SYMBOL(target_configure_unmap_from_queue); + +/* + * Convert from blocksize advertised to the initiator to the 512 byte + * units unconditionally used by the Linux block layer. + */ +sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) +{ + switch (dev->dev_attrib.block_size) { + case 4096: + return lb << 3; + case 2048: + return lb << 2; + case 1024: + return lb << 1; + default: + return lb; + } +} +EXPORT_SYMBOL(target_to_linux_sector); + +struct devices_idr_iter { + int (*fn)(struct se_device *dev, void *data); + void *data; +}; + +static int target_devices_idr_iter(int id, void *p, void *data) + __must_hold(&device_mutex) +{ + struct devices_idr_iter *iter = data; + struct se_device *dev = p; + struct config_item *item; + int ret; + + /* + * We add the device early to the idr, so it can be used + * by backend modules during configuration. We do not want + * to allow other callers to access partially setup devices, + * so we skip them here. + */ + if (!target_dev_configured(dev)) + return 0; + + item = config_item_get_unless_zero(&dev->dev_group.cg_item); + if (!item) + return 0; + mutex_unlock(&device_mutex); + + ret = iter->fn(dev, iter->data); + config_item_put(item); + + mutex_lock(&device_mutex); + return ret; +} + +/** + * target_for_each_device - iterate over configured devices + * @fn: iterator function + * @data: pointer to data that will be passed to fn + * + * fn must return 0 to continue looping over devices. non-zero will break + * from the loop and return that value to the caller. + */ +int target_for_each_device(int (*fn)(struct se_device *dev, void *data), + void *data) +{ + struct devices_idr_iter iter = { .fn = fn, .data = data }; + int ret; + + mutex_lock(&device_mutex); + ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); + mutex_unlock(&device_mutex); + return ret; +} + +int target_configure_device(struct se_device *dev) +{ + struct se_hba *hba = dev->se_hba; + int ret, id; + + if (target_dev_configured(dev)) { + pr_err("se_dev->se_dev_ptr already set for storage" + " object\n"); + return -EEXIST; + } + + /* + * Add early so modules like tcmu can use during its + * configuration. + */ + mutex_lock(&device_mutex); + /* + * Use cyclic to try and avoid collisions with devices + * that were recently removed. + */ + id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); + mutex_unlock(&device_mutex); + if (id < 0) { + ret = -ENOMEM; + goto out; + } + dev->dev_index = id; + + ret = dev->transport->configure_device(dev); + if (ret) + goto out_free_index; + + if (dev->transport->configure_unmap && + dev->transport->configure_unmap(dev)) { + pr_debug("Discard support available, but disabled by default.\n"); + } + + /* + * XXX: there is not much point to have two different values here.. + */ + dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; + dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; + + /* + * Align max_hw_sectors down to PAGE_SIZE I/O transfers + */ + dev->dev_attrib.hw_max_sectors = + se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, + dev->dev_attrib.hw_block_size); + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; + + dev->creation_time = get_jiffies_64(); + + ret = core_setup_alua(dev); + if (ret) + goto out_destroy_device; + + /* + * Setup work_queue for QUEUE_FULL + */ + INIT_WORK(&dev->qf_work_queue, target_qf_do_work); + + scsi_dump_inquiry(dev); + + spin_lock(&hba->device_lock); + hba->dev_count++; + spin_unlock(&hba->device_lock); + + dev->dev_flags |= DF_CONFIGURED; + + return 0; + +out_destroy_device: + dev->transport->destroy_device(dev); +out_free_index: + mutex_lock(&device_mutex); + idr_remove(&devices_idr, dev->dev_index); + mutex_unlock(&device_mutex); +out: + se_release_vpd_for_dev(dev); + return ret; +} + +void target_free_device(struct se_device *dev) +{ + struct se_hba *hba = dev->se_hba; + + WARN_ON(!list_empty(&dev->dev_sep_list)); + + if (target_dev_configured(dev)) { + dev->transport->destroy_device(dev); + + mutex_lock(&device_mutex); + idr_remove(&devices_idr, dev->dev_index); + mutex_unlock(&device_mutex); + + spin_lock(&hba->device_lock); + hba->dev_count--; + spin_unlock(&hba->device_lock); + } + + core_alua_free_lu_gp_mem(dev); + core_alua_set_lba_map(dev, NULL, 0, 0); + core_scsi3_free_all_registrations(dev); + se_release_vpd_for_dev(dev); + + if (dev->transport->free_prot) + dev->transport->free_prot(dev); + + kfree(dev->queues); + dev->transport->free_device(dev); +} + +int core_dev_setup_virtual_lun0(void) +{ + struct se_hba *hba; + struct se_device *dev; + char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1"; + int ret; + + hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); + if (IS_ERR(hba)) + return PTR_ERR(hba); + + dev = target_alloc_device(hba, "virt_lun0"); + if (!dev) { + ret = -ENOMEM; + goto out_free_hba; + } + + hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); + + ret = target_configure_device(dev); + if (ret) + goto out_free_se_dev; + + lun0_hba = hba; + g_lun0_dev = dev; + return 0; + +out_free_se_dev: + target_free_device(dev); +out_free_hba: + core_delete_hba(hba); + return ret; +} + + +void core_dev_release_virtual_lun0(void) +{ + struct se_hba *hba = lun0_hba; + + if (!hba) + return; + + if (g_lun0_dev) + target_free_device(g_lun0_dev); + core_delete_hba(hba); +} + +/* + * Common CDB parsing for kernel and user passthrough. + */ +sense_reason_t +passthrough_parse_cdb(struct se_cmd *cmd, + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) +{ + unsigned char *cdb = cmd->t_task_cdb; + struct se_device *dev = cmd->se_dev; + unsigned int size; + + /* + * For REPORT LUNS we always need to emulate the response, for everything + * else, pass it up. + */ + if (cdb[0] == REPORT_LUNS) { + cmd->execute_cmd = spc_emulate_report_luns; + return TCM_NO_SENSE; + } + + /* + * With emulate_pr disabled, all reservation requests should fail, + * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. + */ + if (!dev->dev_attrib.emulate_pr && + ((cdb[0] == PERSISTENT_RESERVE_IN) || + (cdb[0] == PERSISTENT_RESERVE_OUT) || + (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || + (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + /* + * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to + * emulate the response, since tcmu does not have the information + * required to process these commands. + */ + if (!(dev->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_PGR)) { + if (cdb[0] == PERSISTENT_RESERVE_IN) { + cmd->execute_cmd = target_scsi3_emulate_pr_in; + size = get_unaligned_be16(&cdb[7]); + return target_cmd_size_check(cmd, size); + } + if (cdb[0] == PERSISTENT_RESERVE_OUT) { + cmd->execute_cmd = target_scsi3_emulate_pr_out; + size = get_unaligned_be32(&cdb[5]); + return target_cmd_size_check(cmd, size); + } + + if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { + cmd->execute_cmd = target_scsi2_reservation_release; + if (cdb[0] == RELEASE_10) + size = get_unaligned_be16(&cdb[7]); + else + size = cmd->data_length; + return target_cmd_size_check(cmd, size); + } + if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { + cmd->execute_cmd = target_scsi2_reservation_reserve; + if (cdb[0] == RESERVE_10) + size = get_unaligned_be16(&cdb[7]); + else + size = cmd->data_length; + return target_cmd_size_check(cmd, size); + } + } + + /* Set DATA_CDB flag for ops that should have it */ + switch (cdb[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_VERIFY: + case WRITE_VERIFY_12: + case WRITE_VERIFY_16: + case COMPARE_AND_WRITE: + case XDWRITEREAD_10: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + case VARIABLE_LENGTH_CMD: + switch (get_unaligned_be16(&cdb[8])) { + case READ_32: + case WRITE_32: + case WRITE_VERIFY_32: + case XDWRITEREAD_32: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + } + } + + cmd->execute_cmd = exec_cmd; + + return TCM_NO_SENSE; +} +EXPORT_SYMBOL(passthrough_parse_cdb); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c new file mode 100644 index 0000000000..b7c637644c --- /dev/null +++ b/drivers/target/target_core_fabric_configfs.c @@ -0,0 +1,1163 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* +* Filename: target_core_fabric_configfs.c + * + * This file contains generic fabric module configfs infrastructure for + * TCM v4.x code + * + * (c) Copyright 2010-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@linux-iscsi.org> +* + ****************************************************************************/ + +#include <linux/kstrtox.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/namei.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/unistd.h> +#include <linux/string.h> +#include <linux/syscalls.h> +#include <linux/configfs.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" + +#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ +static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ +{ \ + struct config_item_type *cit = &tf->tf_##_name##_cit; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = _attrs; \ + cit->ct_owner = tf->tf_ops->module; \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ +} + +#define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ +static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ +{ \ + struct config_item_type *cit = &tf->tf_##_name##_cit; \ + struct configfs_attribute **attrs = tf->tf_ops->tfc_##_name##_attrs; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = attrs; \ + cit->ct_owner = tf->tf_ops->module; \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ +} + +static struct configfs_item_operations target_fabric_port_item_ops; + +/* Start of tfc_tpg_mappedlun_cit */ + +static int target_fabric_mappedlun_link( + struct config_item *lun_acl_ci, + struct config_item *lun_ci) +{ + struct se_dev_entry *deve; + struct se_lun *lun; + struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), + struct se_lun_acl, se_lun_group); + struct se_portal_group *se_tpg; + struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; + bool lun_access_ro; + + if (!lun_ci->ci_type || + lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) { + pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci); + return -EFAULT; + } + lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); + + /* + * Ensure that the source port exists + */ + if (!lun->lun_se_dev) { + pr_err("Source se_lun->lun_se_dev does not exist\n"); + return -EINVAL; + } + if (lun->lun_shutdown) { + pr_err("Unable to create mappedlun symlink because" + " lun->lun_shutdown=true\n"); + return -EINVAL; + } + se_tpg = lun->lun_tpg; + + nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; + tpg_ci = &nacl_ci->ci_group->cg_item; + wwn_ci = &tpg_ci->ci_group->cg_item; + tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item; + wwn_ci_s = &tpg_ci_s->ci_group->cg_item; + /* + * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT + */ + if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { + pr_err("Illegal Initiator ACL SymLink outside of %s\n", + config_item_name(wwn_ci)); + return -EINVAL; + } + if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { + pr_err("Illegal Initiator ACL Symlink outside of %s" + " TPGT: %s\n", config_item_name(wwn_ci), + config_item_name(tpg_ci)); + return -EINVAL; + } + /* + * If this struct se_node_acl was dynamically generated with + * tpg_1/attrib/generate_node_acls=1, use the existing + * deve->lun_access_ro value, which will be true when + * tpg_1/attrib/demo_mode_write_protect=1 + */ + rcu_read_lock(); + deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun); + if (deve) + lun_access_ro = deve->lun_access_ro; + else + lun_access_ro = + (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( + se_tpg)) ? true : false; + rcu_read_unlock(); + /* + * Determine the actual mapped LUN value user wants.. + * + * This value is what the SCSI Initiator actually sees the + * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. + */ + return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro); +} + +static void target_fabric_mappedlun_unlink( + struct config_item *lun_acl_ci, + struct config_item *lun_ci) +{ + struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), + struct se_lun_acl, se_lun_group); + struct se_lun *lun = container_of(to_config_group(lun_ci), + struct se_lun, lun_group); + + core_dev_del_initiator_node_lun_acl(lun, lacl); +} + +static struct se_lun_acl *item_to_lun_acl(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_lun_acl, + se_lun_group); +} + +static ssize_t target_fabric_mappedlun_write_protect_show( + struct config_item *item, char *page) +{ + struct se_lun_acl *lacl = item_to_lun_acl(item); + struct se_node_acl *se_nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t len = 0; + + rcu_read_lock(); + deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun); + if (deve) { + len = sprintf(page, "%d\n", deve->lun_access_ro); + } + rcu_read_unlock(); + + return len; +} + +static ssize_t target_fabric_mappedlun_write_protect_store( + struct config_item *item, const char *page, size_t count) +{ + struct se_lun_acl *lacl = item_to_lun_acl(item); + struct se_node_acl *se_nacl = lacl->se_lun_nacl; + struct se_portal_group *se_tpg = se_nacl->se_tpg; + unsigned long wp; + int ret; + + ret = kstrtoul(page, 0, &wp); + if (ret) + return ret; + + if ((wp != 1) && (wp != 0)) + return -EINVAL; + + /* wp=1 means lun_access_ro=true */ + core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl); + + pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" + " Mapped LUN: %llu Write Protect bit to %s\n", + se_tpg->se_tpg_tfo->fabric_name, + se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF"); + + return count; + +} + +CONFIGFS_ATTR(target_fabric_mappedlun_, write_protect); + +static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { + &target_fabric_mappedlun_attr_write_protect, + NULL, +}; + +static void target_fabric_mappedlun_release(struct config_item *item) +{ + struct se_lun_acl *lacl = container_of(to_config_group(item), + struct se_lun_acl, se_lun_group); + struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; + + core_dev_free_initiator_node_lun_acl(se_tpg, lacl); +} + +static struct configfs_item_operations target_fabric_mappedlun_item_ops = { + .release = target_fabric_mappedlun_release, + .allow_link = target_fabric_mappedlun_link, + .drop_link = target_fabric_mappedlun_unlink, +}; + +TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL, + target_fabric_mappedlun_attrs); + +/* End of tfc_tpg_mappedlun_cit */ + +/* Start of tfc_tpg_mappedlun_port_cit */ + +static struct config_group *target_core_mappedlun_stat_mkdir( + struct config_group *group, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static void target_core_mappedlun_stat_rmdir( + struct config_group *group, + struct config_item *item) +{ + return; +} + +static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { + .make_group = target_core_mappedlun_stat_mkdir, + .drop_item = target_core_mappedlun_stat_rmdir, +}; + +TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops, + NULL); + +/* End of tfc_tpg_mappedlun_port_cit */ + +TF_CIT_SETUP_DRV(tpg_nacl_attrib, NULL, NULL); +TF_CIT_SETUP_DRV(tpg_nacl_auth, NULL, NULL); +TF_CIT_SETUP_DRV(tpg_nacl_param, NULL, NULL); + +/* Start of tfc_tpg_nacl_base_cit */ + +static struct config_group *target_fabric_make_mappedlun( + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl = container_of(group, + struct se_node_acl, acl_group); + struct se_portal_group *se_tpg = se_nacl->se_tpg; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_lun_acl *lacl = NULL; + char *buf; + unsigned long long mapped_lun; + int ret = 0; + + buf = kzalloc(strlen(name) + 1, GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate memory for name buf\n"); + return ERR_PTR(-ENOMEM); + } + snprintf(buf, strlen(name) + 1, "%s", name); + /* + * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. + */ + if (strstr(buf, "lun_") != buf) { + pr_err("Unable to locate \"lun_\" from buf: %s" + " name: %s\n", buf, name); + ret = -EINVAL; + goto out; + } + /* + * Determine the Mapped LUN value. This is what the SCSI Initiator + * Port will actually see. + */ + ret = kstrtoull(buf + 4, 0, &mapped_lun); + if (ret) + goto out; + + lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, + mapped_lun, &ret); + if (!lacl) { + ret = -EINVAL; + goto out; + } + + config_group_init_type_name(&lacl->se_lun_group, name, + &tf->tf_tpg_mappedlun_cit); + + config_group_init_type_name(&lacl->ml_stat_grps.stat_group, + "statistics", &tf->tf_tpg_mappedlun_stat_cit); + configfs_add_default_group(&lacl->ml_stat_grps.stat_group, + &lacl->se_lun_group); + + target_stat_setup_mappedlun_default_groups(lacl); + + kfree(buf); + return &lacl->se_lun_group; +out: + kfree(lacl); + kfree(buf); + return ERR_PTR(ret); +} + +static void target_fabric_drop_mappedlun( + struct config_group *group, + struct config_item *item) +{ + struct se_lun_acl *lacl = container_of(to_config_group(item), + struct se_lun_acl, se_lun_group); + + configfs_remove_default_groups(&lacl->ml_stat_grps.stat_group); + configfs_remove_default_groups(&lacl->se_lun_group); + + config_item_put(item); +} + +static void target_fabric_nacl_base_release(struct config_item *item) +{ + struct se_node_acl *se_nacl = container_of(to_config_group(item), + struct se_node_acl, acl_group); + + configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group); + core_tpg_del_initiator_node_acl(se_nacl); +} + +static struct configfs_item_operations target_fabric_nacl_base_item_ops = { + .release = target_fabric_nacl_base_release, +}; + +static struct configfs_group_operations target_fabric_nacl_base_group_ops = { + .make_group = target_fabric_make_mappedlun, + .drop_item = target_fabric_drop_mappedlun, +}; + +TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops, + &target_fabric_nacl_base_group_ops); + +/* End of tfc_tpg_nacl_base_cit */ + +/* Start of tfc_node_fabric_stats_cit */ +/* + * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group + * to allow fabrics access to ->acl_fabric_stat_group->default_groups[] + */ +TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL); + +/* End of tfc_wwn_fabric_stats_cit */ + +/* Start of tfc_tpg_nacl_cit */ + +static struct config_group *target_fabric_make_nodeacl( + struct config_group *group, + const char *name) +{ + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_acl_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_node_acl *se_nacl; + + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name); + if (IS_ERR(se_nacl)) + return ERR_CAST(se_nacl); + + config_group_init_type_name(&se_nacl->acl_group, name, + &tf->tf_tpg_nacl_base_cit); + + config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", + &tf->tf_tpg_nacl_attrib_cit); + configfs_add_default_group(&se_nacl->acl_attrib_group, + &se_nacl->acl_group); + + config_group_init_type_name(&se_nacl->acl_auth_group, "auth", + &tf->tf_tpg_nacl_auth_cit); + configfs_add_default_group(&se_nacl->acl_auth_group, + &se_nacl->acl_group); + + config_group_init_type_name(&se_nacl->acl_param_group, "param", + &tf->tf_tpg_nacl_param_cit); + configfs_add_default_group(&se_nacl->acl_param_group, + &se_nacl->acl_group); + + config_group_init_type_name(&se_nacl->acl_fabric_stat_group, + "fabric_statistics", &tf->tf_tpg_nacl_stat_cit); + configfs_add_default_group(&se_nacl->acl_fabric_stat_group, + &se_nacl->acl_group); + + if (tf->tf_ops->fabric_init_nodeacl) { + int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name); + if (ret) { + configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group); + core_tpg_del_initiator_node_acl(se_nacl); + return ERR_PTR(ret); + } + } + + return &se_nacl->acl_group; +} + +static void target_fabric_drop_nodeacl( + struct config_group *group, + struct config_item *item) +{ + struct se_node_acl *se_nacl = container_of(to_config_group(item), + struct se_node_acl, acl_group); + + configfs_remove_default_groups(&se_nacl->acl_group); + + /* + * struct se_node_acl free is done in target_fabric_nacl_base_release() + */ + config_item_put(item); +} + +static struct configfs_group_operations target_fabric_nacl_group_ops = { + .make_group = target_fabric_make_nodeacl, + .drop_item = target_fabric_drop_nodeacl, +}; + +TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); + +/* End of tfc_tpg_nacl_cit */ + +/* Start of tfc_tpg_np_base_cit */ + +static void target_fabric_np_base_release(struct config_item *item) +{ + struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), + struct se_tpg_np, tpg_np_group); + struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + + tf->tf_ops->fabric_drop_np(se_tpg_np); +} + +static struct configfs_item_operations target_fabric_np_base_item_ops = { + .release = target_fabric_np_base_release, +}; + +TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL); + +/* End of tfc_tpg_np_base_cit */ + +/* Start of tfc_tpg_np_cit */ + +static struct config_group *target_fabric_make_np( + struct config_group *group, + const char *name) +{ + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_np_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + struct se_tpg_np *se_tpg_np; + + if (!tf->tf_ops->fabric_make_np) { + pr_err("tf->tf_ops.fabric_make_np is NULL\n"); + return ERR_PTR(-ENOSYS); + } + + se_tpg_np = tf->tf_ops->fabric_make_np(se_tpg, group, name); + if (!se_tpg_np || IS_ERR(se_tpg_np)) + return ERR_PTR(-EINVAL); + + se_tpg_np->tpg_np_parent = se_tpg; + config_group_init_type_name(&se_tpg_np->tpg_np_group, name, + &tf->tf_tpg_np_base_cit); + + return &se_tpg_np->tpg_np_group; +} + +static void target_fabric_drop_np( + struct config_group *group, + struct config_item *item) +{ + /* + * struct se_tpg_np is released via target_fabric_np_base_release() + */ + config_item_put(item); +} + +static struct configfs_group_operations target_fabric_np_group_ops = { + .make_group = &target_fabric_make_np, + .drop_item = &target_fabric_drop_np, +}; + +TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL); + +/* End of tfc_tpg_np_cit */ + +/* Start of tfc_tpg_port_cit */ + +static struct se_lun *item_to_lun(struct config_item *item) +{ + return container_of(to_config_group(item), struct se_lun, + lun_group); +} + +static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_show_tg_pt_gp_info(lun, page); +} + +static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_store_tg_pt_gp_info(lun, page, count); +} + +static ssize_t target_fabric_port_alua_tg_pt_offline_show( + struct config_item *item, char *page) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_show_offline_bit(lun, page); +} + +static ssize_t target_fabric_port_alua_tg_pt_offline_store( + struct config_item *item, const char *page, size_t count) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_store_offline_bit(lun, page, count); +} + +static ssize_t target_fabric_port_alua_tg_pt_status_show( + struct config_item *item, char *page) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_show_secondary_status(lun, page); +} + +static ssize_t target_fabric_port_alua_tg_pt_status_store( + struct config_item *item, const char *page, size_t count) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_store_secondary_status(lun, page, count); +} + +static ssize_t target_fabric_port_alua_tg_pt_write_md_show( + struct config_item *item, char *page) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_show_secondary_write_metadata(lun, page); +} + +static ssize_t target_fabric_port_alua_tg_pt_write_md_store( + struct config_item *item, const char *page, size_t count) +{ + struct se_lun *lun = item_to_lun(item); + + if (!lun->lun_se_dev) + return -ENODEV; + + return core_alua_store_secondary_write_metadata(lun, page, count); +} + +CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_gp); +CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_offline); +CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_status); +CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_write_md); + +static struct configfs_attribute *target_fabric_port_attrs[] = { + &target_fabric_port_attr_alua_tg_pt_gp, + &target_fabric_port_attr_alua_tg_pt_offline, + &target_fabric_port_attr_alua_tg_pt_status, + &target_fabric_port_attr_alua_tg_pt_write_md, + NULL, +}; + +static int target_fabric_port_link( + struct config_item *lun_ci, + struct config_item *se_dev_ci) +{ + struct config_item *tpg_ci; + struct se_lun *lun = container_of(to_config_group(lun_ci), + struct se_lun, lun_group); + struct se_portal_group *se_tpg; + struct se_device *dev; + struct target_fabric_configfs *tf; + int ret; + + if (!se_dev_ci->ci_type || + se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) { + pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci); + return -EFAULT; + } + dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group); + + if (!target_dev_configured(dev)) { + pr_err("se_device not configured yet, cannot port link\n"); + return -ENODEV; + } + + tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; + se_tpg = container_of(to_config_group(tpg_ci), + struct se_portal_group, tpg_group); + tf = se_tpg->se_tpg_wwn->wwn_tf; + + if (lun->lun_se_dev != NULL) { + pr_err("Port Symlink already exists\n"); + return -EEXIST; + } + + ret = core_dev_add_lun(se_tpg, dev, lun); + if (ret) { + pr_err("core_dev_add_lun() failed: %d\n", ret); + goto out; + } + + if (tf->tf_ops->fabric_post_link) { + /* + * Call the optional fabric_post_link() to allow a + * fabric module to setup any additional state once + * core_dev_add_lun() has been called.. + */ + tf->tf_ops->fabric_post_link(se_tpg, lun); + } + + return 0; +out: + return ret; +} + +static void target_fabric_port_unlink( + struct config_item *lun_ci, + struct config_item *se_dev_ci) +{ + struct se_lun *lun = container_of(to_config_group(lun_ci), + struct se_lun, lun_group); + struct se_portal_group *se_tpg = lun->lun_tpg; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + + if (tf->tf_ops->fabric_pre_unlink) { + /* + * Call the optional fabric_pre_unlink() to allow a + * fabric module to release any additional stat before + * core_dev_del_lun() is called. + */ + tf->tf_ops->fabric_pre_unlink(se_tpg, lun); + } + + core_dev_del_lun(se_tpg, lun); +} + +static void target_fabric_port_release(struct config_item *item) +{ + struct se_lun *lun = container_of(to_config_group(item), + struct se_lun, lun_group); + + kfree_rcu(lun, rcu_head); +} + +static struct configfs_item_operations target_fabric_port_item_ops = { + .release = target_fabric_port_release, + .allow_link = target_fabric_port_link, + .drop_link = target_fabric_port_unlink, +}; + +TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs); + +/* End of tfc_tpg_port_cit */ + +/* Start of tfc_tpg_port_stat_cit */ + +static struct config_group *target_core_port_stat_mkdir( + struct config_group *group, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static void target_core_port_stat_rmdir( + struct config_group *group, + struct config_item *item) +{ + return; +} + +static struct configfs_group_operations target_fabric_port_stat_group_ops = { + .make_group = target_core_port_stat_mkdir, + .drop_item = target_core_port_stat_rmdir, +}; + +TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL); + +/* End of tfc_tpg_port_stat_cit */ + +/* Start of tfc_tpg_lun_cit */ + +static struct config_group *target_fabric_make_lun( + struct config_group *group, + const char *name) +{ + struct se_lun *lun; + struct se_portal_group *se_tpg = container_of(group, + struct se_portal_group, tpg_lun_group); + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + unsigned long long unpacked_lun; + int errno; + + if (strstr(name, "lun_") != name) { + pr_err("Unable to locate \'_\" in" + " \"lun_$LUN_NUMBER\"\n"); + return ERR_PTR(-EINVAL); + } + errno = kstrtoull(name + 4, 0, &unpacked_lun); + if (errno) + return ERR_PTR(errno); + + lun = core_tpg_alloc_lun(se_tpg, unpacked_lun); + if (IS_ERR(lun)) + return ERR_CAST(lun); + + config_group_init_type_name(&lun->lun_group, name, + &tf->tf_tpg_port_cit); + + config_group_init_type_name(&lun->port_stat_grps.stat_group, + "statistics", &tf->tf_tpg_port_stat_cit); + configfs_add_default_group(&lun->port_stat_grps.stat_group, + &lun->lun_group); + + target_stat_setup_port_default_groups(lun); + + return &lun->lun_group; +} + +static void target_fabric_drop_lun( + struct config_group *group, + struct config_item *item) +{ + struct se_lun *lun = container_of(to_config_group(item), + struct se_lun, lun_group); + + configfs_remove_default_groups(&lun->port_stat_grps.stat_group); + configfs_remove_default_groups(&lun->lun_group); + + config_item_put(item); +} + +static struct configfs_group_operations target_fabric_lun_group_ops = { + .make_group = &target_fabric_make_lun, + .drop_item = &target_fabric_drop_lun, +}; + +TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL); + +/* End of tfc_tpg_lun_cit */ + +TF_CIT_SETUP_DRV(tpg_attrib, NULL, NULL); +TF_CIT_SETUP_DRV(tpg_auth, NULL, NULL); +TF_CIT_SETUP_DRV(tpg_param, NULL, NULL); + +/* Start of tfc_tpg_base_cit */ + +static void target_fabric_tpg_release(struct config_item *item) +{ + struct se_portal_group *se_tpg = container_of(to_config_group(item), + struct se_portal_group, tpg_group); + struct se_wwn *wwn = se_tpg->se_tpg_wwn; + struct target_fabric_configfs *tf = wwn->wwn_tf; + + tf->tf_ops->fabric_drop_tpg(se_tpg); +} + +static struct configfs_item_operations target_fabric_tpg_base_item_ops = { + .release = target_fabric_tpg_release, +}; + +static ssize_t target_fabric_tpg_base_enable_show(struct config_item *item, + char *page) +{ + return sysfs_emit(page, "%d\n", to_tpg(item)->enabled); +} + +static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item, + const char *page, + size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + int ret; + bool op; + + ret = kstrtobool(page, &op); + if (ret) + return ret; + + if (se_tpg->enabled == op) + return count; + if (op) + ret = target_tpg_enable(se_tpg); + else + ret = target_tpg_disable(se_tpg); + if (ret) + return ret; + return count; +} +static ssize_t target_fabric_tpg_base_rtpi_show(struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + + return sysfs_emit(page, "%#x\n", se_tpg->tpg_rtpi); +} + +static ssize_t target_fabric_tpg_base_rtpi_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + u16 val; + int ret; + + ret = kstrtou16(page, 0, &val); + if (ret < 0) + return ret; + if (val == 0) + return -EINVAL; + + if (se_tpg->enabled) { + pr_info("%s_TPG[%hu] - Can not change RTPI on enabled TPG", + se_tpg->se_tpg_tfo->fabric_name, + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); + return -EINVAL; + } + + se_tpg->tpg_rtpi = val; + se_tpg->rtpi_manual = true; + + return count; +} + +CONFIGFS_ATTR(target_fabric_tpg_base_, enable); +CONFIGFS_ATTR(target_fabric_tpg_base_, rtpi); + +static int +target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf) +{ + struct config_item_type *cit = &tf->tf_tpg_base_cit; + struct configfs_attribute **attrs = NULL; + size_t nr_attrs = 0; + int i = 0; + + if (tf->tf_ops->tfc_tpg_base_attrs) + while (tf->tf_ops->tfc_tpg_base_attrs[nr_attrs] != NULL) + nr_attrs++; + + if (tf->tf_ops->fabric_enable_tpg) + nr_attrs++; + + /* + 1 for target_fabric_tpg_base_attr_rtpi */ + nr_attrs++; + + /* + 1 for final NULL in the array */ + attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + if (tf->tf_ops->tfc_tpg_base_attrs) + for (; tf->tf_ops->tfc_tpg_base_attrs[i] != NULL; i++) + attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i]; + + if (tf->tf_ops->fabric_enable_tpg) + attrs[i++] = &target_fabric_tpg_base_attr_enable; + + attrs[i++] = &target_fabric_tpg_base_attr_rtpi; + + cit->ct_item_ops = &target_fabric_tpg_base_item_ops; + cit->ct_attrs = attrs; + cit->ct_owner = tf->tf_ops->module; + pr_debug("Setup generic tpg_base\n"); + + return 0; +} +/* End of tfc_tpg_base_cit */ + +/* Start of tfc_tpg_cit */ + +static struct config_group *target_fabric_make_tpg( + struct config_group *group, + const char *name) +{ + struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); + struct target_fabric_configfs *tf = wwn->wwn_tf; + struct se_portal_group *se_tpg; + + if (!tf->tf_ops->fabric_make_tpg) { + pr_err("tf->tf_ops->fabric_make_tpg is NULL\n"); + return ERR_PTR(-ENOSYS); + } + + se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name); + if (!se_tpg || IS_ERR(se_tpg)) + return ERR_PTR(-EINVAL); + + config_group_init_type_name(&se_tpg->tpg_group, name, + &tf->tf_tpg_base_cit); + + config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", + &tf->tf_tpg_lun_cit); + configfs_add_default_group(&se_tpg->tpg_lun_group, + &se_tpg->tpg_group); + + config_group_init_type_name(&se_tpg->tpg_np_group, "np", + &tf->tf_tpg_np_cit); + configfs_add_default_group(&se_tpg->tpg_np_group, + &se_tpg->tpg_group); + + config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", + &tf->tf_tpg_nacl_cit); + configfs_add_default_group(&se_tpg->tpg_acl_group, + &se_tpg->tpg_group); + + config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", + &tf->tf_tpg_attrib_cit); + configfs_add_default_group(&se_tpg->tpg_attrib_group, + &se_tpg->tpg_group); + + config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", + &tf->tf_tpg_auth_cit); + configfs_add_default_group(&se_tpg->tpg_auth_group, + &se_tpg->tpg_group); + + config_group_init_type_name(&se_tpg->tpg_param_group, "param", + &tf->tf_tpg_param_cit); + configfs_add_default_group(&se_tpg->tpg_param_group, + &se_tpg->tpg_group); + + return &se_tpg->tpg_group; +} + +static void target_fabric_drop_tpg( + struct config_group *group, + struct config_item *item) +{ + struct se_portal_group *se_tpg = container_of(to_config_group(item), + struct se_portal_group, tpg_group); + + configfs_remove_default_groups(&se_tpg->tpg_group); + config_item_put(item); +} + +static void target_fabric_release_wwn(struct config_item *item) +{ + struct se_wwn *wwn = container_of(to_config_group(item), + struct se_wwn, wwn_group); + struct target_fabric_configfs *tf = wwn->wwn_tf; + + configfs_remove_default_groups(&wwn->fabric_stat_group); + configfs_remove_default_groups(&wwn->param_group); + tf->tf_ops->fabric_drop_wwn(wwn); +} + +static struct configfs_item_operations target_fabric_tpg_item_ops = { + .release = target_fabric_release_wwn, +}; + +static struct configfs_group_operations target_fabric_tpg_group_ops = { + .make_group = target_fabric_make_tpg, + .drop_item = target_fabric_drop_tpg, +}; + +TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, + NULL); + +/* End of tfc_tpg_cit */ + +/* Start of tfc_wwn_fabric_stats_cit */ +/* + * This is used as a placeholder for struct se_wwn->fabric_stat_group + * to allow fabrics access to ->fabric_stat_group->default_groups[] + */ +TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL); + +/* End of tfc_wwn_fabric_stats_cit */ + +static ssize_t +target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item, + char *page) +{ + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, + param_group); + return sprintf(page, "%d\n", + wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ? + SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity); +} + +static ssize_t +target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, + param_group); + int compl_val; + + if (kstrtoint(page, 0, &compl_val)) + return -EINVAL; + + switch (compl_val) { + case SE_COMPL_AFFINITY_CPUID: + wwn->cmd_compl_affinity = compl_val; + break; + case SE_COMPL_AFFINITY_CURR_CPU: + wwn->cmd_compl_affinity = WORK_CPU_UNBOUND; + break; + default: + if (compl_val < 0 || compl_val >= nr_cpu_ids || + !cpu_online(compl_val)) { + pr_err("Command completion value must be between %d and %d or an online CPU.\n", + SE_COMPL_AFFINITY_CPUID, + SE_COMPL_AFFINITY_CURR_CPU); + return -EINVAL; + } + wwn->cmd_compl_affinity = compl_val; + } + + return count; +} +CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity); + +static struct configfs_attribute *target_fabric_wwn_param_attrs[] = { + &target_fabric_wwn_attr_cmd_completion_affinity, + NULL, +}; + +TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs); + +/* Start of tfc_wwn_cit */ + +static struct config_group *target_fabric_make_wwn( + struct config_group *group, + const char *name) +{ + struct target_fabric_configfs *tf = container_of(group, + struct target_fabric_configfs, tf_group); + struct se_wwn *wwn; + + if (!tf->tf_ops->fabric_make_wwn) { + pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); + return ERR_PTR(-ENOSYS); + } + + wwn = tf->tf_ops->fabric_make_wwn(tf, group, name); + if (!wwn || IS_ERR(wwn)) + return ERR_PTR(-EINVAL); + + wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID; + wwn->wwn_tf = tf; + + config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit); + + config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", + &tf->tf_wwn_fabric_stats_cit); + configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group); + + config_group_init_type_name(&wwn->param_group, "param", + &tf->tf_wwn_param_cit); + configfs_add_default_group(&wwn->param_group, &wwn->wwn_group); + + if (tf->tf_ops->add_wwn_groups) + tf->tf_ops->add_wwn_groups(wwn); + return &wwn->wwn_group; +} + +static void target_fabric_drop_wwn( + struct config_group *group, + struct config_item *item) +{ + struct se_wwn *wwn = container_of(to_config_group(item), + struct se_wwn, wwn_group); + + configfs_remove_default_groups(&wwn->wwn_group); + config_item_put(item); +} + +static struct configfs_group_operations target_fabric_wwn_group_ops = { + .make_group = target_fabric_make_wwn, + .drop_item = target_fabric_drop_wwn, +}; + +TF_CIT_SETUP_DRV(wwn, NULL, &target_fabric_wwn_group_ops); +TF_CIT_SETUP_DRV(discovery, NULL, NULL); + +int target_fabric_setup_cits(struct target_fabric_configfs *tf) +{ + int ret; + + target_fabric_setup_discovery_cit(tf); + target_fabric_setup_wwn_cit(tf); + target_fabric_setup_wwn_fabric_stats_cit(tf); + target_fabric_setup_wwn_param_cit(tf); + target_fabric_setup_tpg_cit(tf); + + ret = target_fabric_setup_tpg_base_cit(tf); + if (ret) + return ret; + + target_fabric_setup_tpg_port_cit(tf); + target_fabric_setup_tpg_port_stat_cit(tf); + target_fabric_setup_tpg_lun_cit(tf); + target_fabric_setup_tpg_np_cit(tf); + target_fabric_setup_tpg_np_base_cit(tf); + target_fabric_setup_tpg_attrib_cit(tf); + target_fabric_setup_tpg_auth_cit(tf); + target_fabric_setup_tpg_param_cit(tf); + target_fabric_setup_tpg_nacl_cit(tf); + target_fabric_setup_tpg_nacl_base_cit(tf); + target_fabric_setup_tpg_nacl_attrib_cit(tf); + target_fabric_setup_tpg_nacl_auth_cit(tf); + target_fabric_setup_tpg_nacl_param_cit(tf); + target_fabric_setup_tpg_nacl_stat_cit(tf); + target_fabric_setup_tpg_mappedlun_cit(tf); + target_fabric_setup_tpg_mappedlun_stat_cit(tf); + + return 0; +} diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c new file mode 100644 index 0000000000..6600ae44f2 --- /dev/null +++ b/drivers/target/target_core_fabric_lib.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_fabric_lib.c + * + * This file contains generic high level protocol identifier and PR + * handlers for TCM fabric modules + * + * (c) Copyright 2010-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +/* + * See SPC4, section 7.5 "Protocol specific parameters" for details + * on the formats implemented in this file. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <linux/spinlock.h> +#include <linux/export.h> +#include <asm/unaligned.h> + +#include <scsi/scsi_proto.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_pr.h" + + +static int sas_get_pr_transport_id( + struct se_node_acl *nacl, + int *format_code, + unsigned char *buf) +{ + int ret; + + /* Skip over 'naa. prefix */ + ret = hex2bin(&buf[4], &nacl->initiatorname[4], 8); + if (ret) { + pr_debug("%s: invalid hex string\n", __func__); + return ret; + } + + return 24; +} + +static int fc_get_pr_transport_id( + struct se_node_acl *se_nacl, + int *format_code, + unsigned char *buf) +{ + unsigned char *ptr; + int i, ret; + u32 off = 8; + + /* + * We convert the ASCII formatted N Port name into a binary + * encoded TransportID. + */ + ptr = &se_nacl->initiatorname[0]; + for (i = 0; i < 23; ) { + if (!strncmp(&ptr[i], ":", 1)) { + i++; + continue; + } + ret = hex2bin(&buf[off++], &ptr[i], 1); + if (ret < 0) { + pr_debug("%s: invalid hex string\n", __func__); + return ret; + } + i += 2; + } + /* + * The FC Transport ID is a hardcoded 24-byte length + */ + return 24; +} + +static int sbp_get_pr_transport_id( + struct se_node_acl *nacl, + int *format_code, + unsigned char *buf) +{ + int ret; + + ret = hex2bin(&buf[8], nacl->initiatorname, 8); + if (ret) { + pr_debug("%s: invalid hex string\n", __func__); + return ret; + } + + return 24; +} + +static int srp_get_pr_transport_id( + struct se_node_acl *nacl, + int *format_code, + unsigned char *buf) +{ + const char *p; + unsigned len, count, leading_zero_bytes; + int rc; + + p = nacl->initiatorname; + if (strncasecmp(p, "0x", 2) == 0) + p += 2; + len = strlen(p); + if (len % 2) + return -EINVAL; + + count = min(len / 2, 16U); + leading_zero_bytes = 16 - count; + memset(buf + 8, 0, leading_zero_bytes); + rc = hex2bin(buf + 8 + leading_zero_bytes, p, count); + if (rc < 0) { + pr_debug("hex2bin failed for %s: %d\n", p, rc); + return rc; + } + + return 24; +} + +static int iscsi_get_pr_transport_id( + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + u32 off = 4, padding = 0; + int isid_len; + u16 len = 0; + + spin_lock_irq(&se_nacl->nacl_sess_lock); + /* + * Only null terminate the last field. + * + * From spc4r37 section 7.6.4.6: TransportID for initiator ports using + * SCSI over iSCSI. + * + * Table 507 TPID=0 Initiator device TransportID + * + * The null-terminated, null-padded (see 4.3.2) ISCSI NAME field shall + * contain the iSCSI name of an iSCSI initiator node (see RFC 7143). + * The first ISCSI NAME field byte containing an ASCII null character + * terminates the ISCSI NAME field without regard for the specified + * length of the iSCSI TransportID or the contents of the ADDITIONAL + * LENGTH field. + */ + len = sprintf(&buf[off], "%s", se_nacl->initiatorname); + off += len; + if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) { + /* + * Set FORMAT CODE 01b for iSCSI Initiator port TransportID + * format. + */ + buf[0] |= 0x40; + /* + * From spc4r37 Section 7.6.4.6 + * + * Table 508 TPID=1 Initiator port TransportID. + * + * The ISCSI NAME field shall not be null-terminated + * (see 4.3.2) and shall not be padded. + * + * The SEPARATOR field shall contain the five ASCII + * characters ",i,0x". + * + * The null-terminated, null-padded ISCSI INITIATOR SESSION ID + * field shall contain the iSCSI initiator session identifier + * (see RFC 3720) in the form of ASCII characters that are the + * hexadecimal digits converted from the binary iSCSI initiator + * session identifier value. The first ISCSI INITIATOR SESSION + * ID field byte containing an ASCII null character terminates + * the ISCSI INITIATOR SESSION ID field without regard for the + * specified length of the iSCSI TransportID or the contents + * of the ADDITIONAL LENGTH field. + */ + buf[off++] = 0x2c; /* ASCII Character: "," */ + buf[off++] = 0x69; /* ASCII Character: "i" */ + buf[off++] = 0x2c; /* ASCII Character: "," */ + buf[off++] = 0x30; /* ASCII Character: "0" */ + buf[off++] = 0x78; /* ASCII Character: "x" */ + len += 5; + + isid_len = sprintf(buf + off, "%s", pr_reg->pr_reg_isid); + off += isid_len; + len += isid_len; + } + buf[off] = '\0'; + len += 1; + spin_unlock_irq(&se_nacl->nacl_sess_lock); + /* + * The ADDITIONAL LENGTH field specifies the number of bytes that follow + * in the TransportID. The additional length shall be at least 20 and + * shall be a multiple of four. + */ + padding = ((-len) & 3); + if (padding != 0) + len += padding; + + put_unaligned_be16(len, &buf[2]); + /* + * Increment value for total payload + header length for + * full status descriptor + */ + len += 4; + + return len; +} + +static int iscsi_get_pr_transport_id_len( + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + u32 len = 0, padding = 0; + + spin_lock_irq(&se_nacl->nacl_sess_lock); + len = strlen(se_nacl->initiatorname); + /* + * Add extra byte for NULL terminator + */ + len++; + /* + * If there is ISID present with the registration, use format code: + * 01b: iSCSI Initiator port TransportID format + * + * If there is not an active iSCSI session, use format code: + * 00b: iSCSI Initiator device TransportID format + */ + if (pr_reg->isid_present_at_reg) { + len += 5; /* For ",i,0x" ASCII separator */ + len += strlen(pr_reg->pr_reg_isid); + *format_code = 1; + } else + *format_code = 0; + spin_unlock_irq(&se_nacl->nacl_sess_lock); + /* + * The ADDITIONAL LENGTH field specifies the number of bytes that follow + * in the TransportID. The additional length shall be at least 20 and + * shall be a multiple of four. + */ + padding = ((-len) & 3); + if (padding != 0) + len += padding; + /* + * Increment value for total payload + header length for + * full status descriptor + */ + len += 4; + + return len; +} + +static char *iscsi_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + char *p; + int i; + u8 format_code = (buf[0] & 0xc0); + /* + * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: + * + * TransportID for initiator ports using SCSI over iSCSI, + * from Table 388 -- iSCSI TransportID formats. + * + * 00b Initiator port is identified using the world wide unique + * SCSI device name of the iSCSI initiator + * device containing the initiator port (see table 389). + * 01b Initiator port is identified using the world wide unique + * initiator port identifier (see table 390).10b to 11b + * Reserved + */ + if ((format_code != 0x00) && (format_code != 0x40)) { + pr_err("Illegal format code: 0x%02x for iSCSI" + " Initiator Transport ID\n", format_code); + return NULL; + } + /* + * If the caller wants the TransportID Length, we set that value for the + * entire iSCSI Tarnsport ID now. + */ + if (out_tid_len) { + /* The shift works thanks to integer promotion rules */ + *out_tid_len = get_unaligned_be16(&buf[2]); + /* Add four bytes for iSCSI Transport ID header */ + *out_tid_len += 4; + } + + /* + * Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator + * Session ID as defined in Table 390 - iSCSI initiator port TransportID + * format. + */ + if (format_code == 0x40) { + p = strstr(&buf[4], ",i,0x"); + if (!p) { + pr_err("Unable to locate \",i,0x\" separator" + " for Initiator port identifier: %s\n", + &buf[4]); + return NULL; + } + *p = '\0'; /* Terminate iSCSI Name */ + p += 5; /* Skip over ",i,0x" separator */ + + *port_nexus_ptr = p; + /* + * Go ahead and do the lower case conversion of the received + * 12 ASCII characters representing the ISID in the TransportID + * for comparison against the running iSCSI session's ISID from + * iscsi_target.c:lio_sess_get_initiator_sid() + */ + for (i = 0; i < 12; i++) { + /* + * The first ISCSI INITIATOR SESSION ID field byte + * containing an ASCII null character terminates the + * ISCSI INITIATOR SESSION ID field without regard for + * the specified length of the iSCSI TransportID or the + * contents of the ADDITIONAL LENGTH field. + */ + if (*p == '\0') + break; + + if (isdigit(*p)) { + p++; + continue; + } + *p = tolower(*p); + p++; + } + } else + *port_nexus_ptr = NULL; + + return &buf[4]; +} + +int target_get_pr_transport_id_len(struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, int *format_code) +{ + switch (nacl->se_tpg->proto_id) { + case SCSI_PROTOCOL_FCP: + case SCSI_PROTOCOL_SBP: + case SCSI_PROTOCOL_SRP: + case SCSI_PROTOCOL_SAS: + break; + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code); + default: + pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id); + return -EINVAL; + } + + /* + * Most transports use a fixed length 24 byte identifier. + */ + *format_code = 0; + return 24; +} + +int target_get_pr_transport_id(struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, int *format_code, + unsigned char *buf) +{ + switch (nacl->se_tpg->proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id(nacl, format_code, buf); + case SCSI_PROTOCOL_SBP: + return sbp_get_pr_transport_id(nacl, format_code, buf); + case SCSI_PROTOCOL_SRP: + return srp_get_pr_transport_id(nacl, format_code, buf); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id(nacl, format_code, buf); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id(nacl, pr_reg, format_code, + buf); + default: + pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id); + return -EINVAL; + } +} + +const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, + char *buf, u32 *out_tid_len, char **port_nexus_ptr) +{ + u32 offset; + + switch (tpg->proto_id) { + case SCSI_PROTOCOL_SAS: + /* + * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID + * for initiator ports using SCSI over SAS Serial SCSI Protocol. + */ + offset = 4; + break; + case SCSI_PROTOCOL_SBP: + case SCSI_PROTOCOL_SRP: + case SCSI_PROTOCOL_FCP: + offset = 8; + break; + case SCSI_PROTOCOL_ISCSI: + return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len, + port_nexus_ptr); + default: + pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id); + return NULL; + } + + *port_nexus_ptr = NULL; + *out_tid_len = 24; + return buf + offset; +} diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c new file mode 100644 index 0000000000..4e4cf6c34a --- /dev/null +++ b/drivers/target/target_core_file.c @@ -0,0 +1,954 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_file.c + * + * This file contains the Storage Engine <-> FILEIO transport specific functions + * + * (c) Copyright 2005-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/string.h> +#include <linux/parser.h> +#include <linux/timer.h> +#include <linux/blkdev.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/falloc.h> +#include <linux/uio.h> +#include <linux/scatterlist.h> +#include <scsi/scsi_proto.h> +#include <asm/unaligned.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> + +#include "target_core_file.h" + +static inline struct fd_dev *FD_DEV(struct se_device *dev) +{ + return container_of(dev, struct fd_dev, dev); +} + +static int fd_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct fd_host *fd_host; + + fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); + if (!fd_host) { + pr_err("Unable to allocate memory for struct fd_host\n"); + return -ENOMEM; + } + + fd_host->fd_host_id = host_id; + + hba->hba_ptr = fd_host; + + pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" + " Target Core Stack %s\n", hba->hba_id, FD_VERSION, + TARGET_CORE_VERSION); + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", + hba->hba_id, fd_host->fd_host_id); + + return 0; +} + +static void fd_detach_hba(struct se_hba *hba) +{ + struct fd_host *fd_host = hba->hba_ptr; + + pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" + " Target Core\n", hba->hba_id, fd_host->fd_host_id); + + kfree(fd_host); + hba->hba_ptr = NULL; +} + +static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name) +{ + struct fd_dev *fd_dev; + struct fd_host *fd_host = hba->hba_ptr; + + fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); + if (!fd_dev) { + pr_err("Unable to allocate memory for struct fd_dev\n"); + return NULL; + } + + fd_dev->fd_host = fd_host; + + pr_debug("FILEIO: Allocated fd_dev for %p\n", name); + + return &fd_dev->dev; +} + +static bool fd_configure_unmap(struct se_device *dev) +{ + struct file *file = FD_DEV(dev)->fd_file; + struct inode *inode = file->f_mapping->host; + + if (S_ISBLK(inode->i_mode)) + return target_configure_unmap_from_queue(&dev->dev_attrib, + I_BDEV(inode)); + + /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */ + dev->dev_attrib.max_unmap_lba_count = 0x2000; + /* Currently hardcoded to 1 in Linux/SCSI code. */ + dev->dev_attrib.max_unmap_block_desc_count = 1; + dev->dev_attrib.unmap_granularity = 1; + dev->dev_attrib.unmap_granularity_alignment = 0; + return true; +} + +static int fd_configure_device(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct fd_host *fd_host = dev->se_hba->hba_ptr; + struct file *file; + struct inode *inode = NULL; + int flags, ret = -EINVAL; + + if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { + pr_err("Missing fd_dev_name=\n"); + return -EINVAL; + } + + /* + * Use O_DSYNC by default instead of O_SYNC to forgo syncing + * of pure timestamp updates. + */ + flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; + + /* + * Optionally allow fd_buffered_io=1 to be enabled for people + * who want use the fs buffer cache as an WriteCache mechanism. + * + * This means that in event of a hard failure, there is a risk + * of silent data-loss if the SCSI client has *not* performed a + * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE + * to write-out the entire device cache. + */ + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { + pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n"); + flags &= ~O_DSYNC; + } + + file = filp_open(fd_dev->fd_dev_name, flags, 0600); + if (IS_ERR(file)) { + pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); + ret = PTR_ERR(file); + goto fail; + } + fd_dev->fd_file = file; + /* + * If using a block backend with this struct file, we extract + * fd_dev->fd_[block,dev]_size from struct block_device. + * + * Otherwise, we use the passed fd_size= from configfs + */ + inode = file->f_mapping->host; + if (S_ISBLK(inode->i_mode)) { + struct block_device *bdev = I_BDEV(inode); + unsigned long long dev_size; + + fd_dev->fd_block_size = bdev_logical_block_size(bdev); + /* + * Determine the number of bytes from i_size_read() minus + * one (1) logical sector from underlying struct block_device + */ + dev_size = (i_size_read(file->f_mapping->host) - + fd_dev->fd_block_size); + + pr_debug("FILEIO: Using size: %llu bytes from struct" + " block_device blocks: %llu logical_block_size: %d\n", + dev_size, div_u64(dev_size, fd_dev->fd_block_size), + fd_dev->fd_block_size); + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. + */ + dev->dev_attrib.max_write_same_len = 0xFFFF; + + if (bdev_nonrot(bdev)) + dev->dev_attrib.is_nonrot = 1; + } else { + if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { + pr_err("FILEIO: Missing fd_dev_size=" + " parameter, and no backing struct" + " block_device\n"); + goto fail; + } + + fd_dev->fd_block_size = FD_BLOCKSIZE; + + /* + * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) + * based upon struct iovec limit for vfs_writev() + */ + dev->dev_attrib.max_write_same_len = 0x1000; + } + + dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; + dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; + dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; + + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { + pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" + " with FDBD_HAS_BUFFERED_IO_WCE\n"); + dev->dev_attrib.emulate_write_cache = 1; + } + + fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; + fd_dev->fd_queue_depth = dev->queue_depth; + + pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," + " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, + fd_dev->fd_dev_name, fd_dev->fd_dev_size); + + return 0; +fail: + if (fd_dev->fd_file) { + filp_close(fd_dev->fd_file, NULL); + fd_dev->fd_file = NULL; + } + return ret; +} + +static void fd_dev_call_rcu(struct rcu_head *p) +{ + struct se_device *dev = container_of(p, struct se_device, rcu_head); + struct fd_dev *fd_dev = FD_DEV(dev); + + kfree(fd_dev); +} + +static void fd_free_device(struct se_device *dev) +{ + call_rcu(&dev->rcu_head, fd_dev_call_rcu); +} + +static void fd_destroy_device(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + + if (fd_dev->fd_file) { + filp_close(fd_dev->fd_file, NULL); + fd_dev->fd_file = NULL; + } +} + +struct target_core_file_cmd { + unsigned long len; + struct se_cmd *cmd; + struct kiocb iocb; + struct bio_vec bvecs[]; +}; + +static void cmd_rw_aio_complete(struct kiocb *iocb, long ret) +{ + struct target_core_file_cmd *cmd; + + cmd = container_of(iocb, struct target_core_file_cmd, iocb); + + if (ret != cmd->len) + target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION); + else + target_complete_cmd(cmd->cmd, SAM_STAT_GOOD); + + kfree(cmd); +} + +static sense_reason_t +fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) +{ + int is_write = !(data_direction == DMA_FROM_DEVICE); + struct se_device *dev = cmd->se_dev; + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *file = fd_dev->fd_file; + struct target_core_file_cmd *aio_cmd; + struct iov_iter iter; + struct scatterlist *sg; + ssize_t len = 0; + int ret = 0, i; + + aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL); + if (!aio_cmd) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + for_each_sg(sgl, sg, sgl_nents, i) { + bvec_set_page(&aio_cmd->bvecs[i], sg_page(sg), sg->length, + sg->offset); + len += sg->length; + } + + iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len); + + aio_cmd->cmd = cmd; + aio_cmd->len = len; + aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size; + aio_cmd->iocb.ki_filp = file; + aio_cmd->iocb.ki_complete = cmd_rw_aio_complete; + aio_cmd->iocb.ki_flags = IOCB_DIRECT; + + if (is_write && (cmd->se_cmd_flags & SCF_FUA)) + aio_cmd->iocb.ki_flags |= IOCB_DSYNC; + + if (is_write) + ret = call_write_iter(file, &aio_cmd->iocb, &iter); + else + ret = call_read_iter(file, &aio_cmd->iocb, &iter); + + if (ret != -EIOCBQUEUED) + cmd_rw_aio_complete(&aio_cmd->iocb, ret); + + return 0; +} + +static int fd_do_rw(struct se_cmd *cmd, struct file *fd, + u32 block_size, struct scatterlist *sgl, + u32 sgl_nents, u32 data_length, int is_write) +{ + struct scatterlist *sg; + struct iov_iter iter; + struct bio_vec *bvec; + ssize_t len = 0; + loff_t pos = (cmd->t_task_lba * block_size); + int ret = 0, i; + + bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); + if (!bvec) { + pr_err("Unable to allocate fd_do_readv iov[]\n"); + return -ENOMEM; + } + + for_each_sg(sgl, sg, sgl_nents, i) { + bvec_set_page(&bvec[i], sg_page(sg), sg->length, sg->offset); + len += sg->length; + } + + iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); + if (is_write) { + file_start_write(fd); + ret = vfs_iter_write(fd, &iter, &pos, 0); + file_end_write(fd); + } else { + ret = vfs_iter_read(fd, &iter, &pos, 0); + } + if (is_write) { + if (ret < 0 || ret != data_length) { + pr_err("%s() write returned %d\n", __func__, ret); + if (ret >= 0) + ret = -EINVAL; + } + } else { + /* + * Return zeros and GOOD status even if the READ did not return + * the expected virt_size for struct file w/o a backing struct + * block_device. + */ + if (S_ISBLK(file_inode(fd)->i_mode)) { + if (ret < 0 || ret != data_length) { + pr_err("%s() returned %d, expecting %u for " + "S_ISBLK\n", __func__, ret, + data_length); + if (ret >= 0) + ret = -EINVAL; + } + } else { + if (ret < 0) { + pr_err("%s() returned %d for non S_ISBLK\n", + __func__, ret); + } else if (ret != data_length) { + /* + * Short read case: + * Probably some one truncate file under us. + * We must explicitly zero sg-pages to prevent + * expose uninizialized pages to userspace. + */ + if (ret < data_length) + ret += iov_iter_zero(data_length - ret, &iter); + else + ret = -EINVAL; + } + } + } + kfree(bvec); + return ret; +} + +static sense_reason_t +fd_execute_sync_cache(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct fd_dev *fd_dev = FD_DEV(dev); + int immed = (cmd->t_task_cdb[1] & 0x2); + loff_t start, end; + int ret; + + /* + * If the Immediate bit is set, queue up the GOOD response + * for this SYNCHRONIZE_CACHE op + */ + if (immed) + target_complete_cmd(cmd, SAM_STAT_GOOD); + + /* + * Determine if we will be flushing the entire device. + */ + if (cmd->t_task_lba == 0 && cmd->data_length == 0) { + start = 0; + end = LLONG_MAX; + } else { + start = cmd->t_task_lba * dev->dev_attrib.block_size; + if (cmd->data_length) + end = start + cmd->data_length - 1; + else + end = LLONG_MAX; + } + + ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); + if (ret != 0) + pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); + + if (immed) + return 0; + + if (ret) + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + else + target_complete_cmd(cmd, SAM_STAT_GOOD); + + return 0; +} + +static sense_reason_t +fd_execute_write_same(struct se_cmd *cmd) +{ + struct se_device *se_dev = cmd->se_dev; + struct fd_dev *fd_dev = FD_DEV(se_dev); + loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; + sector_t nolb = sbc_get_write_same_sectors(cmd); + struct iov_iter iter; + struct bio_vec *bvec; + unsigned int len = 0, i; + ssize_t ret; + + if (cmd->prot_op) { + pr_err("WRITE_SAME: Protection information with FILEIO" + " backends not supported\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + if (!cmd->t_data_nents) + return TCM_INVALID_CDB_FIELD; + + if (cmd->t_data_nents > 1 || + cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) { + pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" + " block_size: %u\n", + cmd->t_data_nents, + cmd->t_data_sg[0].length, + cmd->se_dev->dev_attrib.block_size); + return TCM_INVALID_CDB_FIELD; + } + + bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); + if (!bvec) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + for (i = 0; i < nolb; i++) { + bvec_set_page(&bvec[i], sg_page(&cmd->t_data_sg[0]), + cmd->t_data_sg[0].length, + cmd->t_data_sg[0].offset); + len += se_dev->dev_attrib.block_size; + } + + iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len); + file_start_write(fd_dev->fd_file); + ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); + file_end_write(fd_dev->fd_file); + + kfree(bvec); + if (ret < 0 || ret != len) { + pr_err("vfs_iter_write() returned %zd for write same\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +static int +fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, + void *buf, size_t bufsize) +{ + struct fd_dev *fd_dev = FD_DEV(se_dev); + struct file *prot_fd = fd_dev->fd_prot_file; + sector_t prot_length, prot; + loff_t pos = lba * se_dev->prot_length; + + if (!prot_fd) { + pr_err("Unable to locate fd_dev->fd_prot_file\n"); + return -ENODEV; + } + + prot_length = nolb * se_dev->prot_length; + + memset(buf, 0xff, bufsize); + for (prot = 0; prot < prot_length;) { + sector_t len = min_t(sector_t, bufsize, prot_length - prot); + ssize_t ret = kernel_write(prot_fd, buf, len, &pos); + + if (ret != len) { + pr_err("vfs_write to prot file failed: %zd\n", ret); + return ret < 0 ? ret : -ENODEV; + } + prot += ret; + } + + return 0; +} + +static int +fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) +{ + void *buf; + int rc; + + buf = (void *)__get_free_page(GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate FILEIO prot buf\n"); + return -ENOMEM; + } + + rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); + + free_page((unsigned long)buf); + + return rc; +} + +static sense_reason_t +fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) +{ + struct file *file = FD_DEV(cmd->se_dev)->fd_file; + struct inode *inode = file->f_mapping->host; + int ret; + + if (!nolb) { + return 0; + } + + if (cmd->se_dev->dev_attrib.pi_prot_type) { + ret = fd_do_prot_unmap(cmd, lba, nolb); + if (ret) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + if (S_ISBLK(inode->i_mode)) { + /* The backend is block device, use discard */ + struct block_device *bdev = I_BDEV(inode); + struct se_device *dev = cmd->se_dev; + + ret = blkdev_issue_discard(bdev, + target_to_linux_sector(dev, lba), + target_to_linux_sector(dev, nolb), + GFP_KERNEL); + if (ret < 0) { + pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", + ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + } else { + /* The backend is normal file, use fallocate */ + struct se_device *se_dev = cmd->se_dev; + loff_t pos = lba * se_dev->dev_attrib.block_size; + unsigned int len = nolb * se_dev->dev_attrib.block_size; + int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; + + if (!file->f_op->fallocate) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + ret = file->f_op->fallocate(file, mode, pos, len); + if (ret < 0) { + pr_warn("FILEIO: fallocate() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + } + + return 0; +} + +static sense_reason_t +fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) +{ + struct se_device *dev = cmd->se_dev; + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *file = fd_dev->fd_file; + struct file *pfile = fd_dev->fd_prot_file; + sense_reason_t rc; + int ret = 0; + /* + * Call vectorized fileio functions to map struct scatterlist + * physical memory addresses to struct iovec virtual memory. + */ + if (data_direction == DMA_FROM_DEVICE) { + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { + ret = fd_do_rw(cmd, pfile, dev->prot_length, + cmd->t_prot_sg, cmd->t_prot_nents, + cmd->prot_length, 0); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size, + sgl, sgl_nents, cmd->data_length, 0); + + if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type && + dev->dev_attrib.pi_prot_verify) { + u32 sectors = cmd->data_length >> + ilog2(dev->dev_attrib.block_size); + + rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, + 0, cmd->t_prot_sg, 0); + if (rc) + return rc; + } + } else { + if (cmd->prot_type && dev->dev_attrib.pi_prot_type && + dev->dev_attrib.pi_prot_verify) { + u32 sectors = cmd->data_length >> + ilog2(dev->dev_attrib.block_size); + + rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, + 0, cmd->t_prot_sg, 0); + if (rc) + return rc; + } + + ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size, + sgl, sgl_nents, cmd->data_length, 1); + /* + * Perform implicit vfs_fsync_range() for fd_do_writev() ops + * for SCSI WRITEs with Forced Unit Access (FUA) set. + * Allow this to happen independent of WCE=0 setting. + */ + if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) { + loff_t start = cmd->t_task_lba * + dev->dev_attrib.block_size; + loff_t end; + + if (cmd->data_length) + end = start + cmd->data_length - 1; + else + end = LLONG_MAX; + + vfs_fsync_range(fd_dev->fd_file, start, end, 1); + } + + if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { + ret = fd_do_rw(cmd, pfile, dev->prot_length, + cmd->t_prot_sg, cmd->t_prot_nents, + cmd->prot_length, 1); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + } + + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +static sense_reason_t +fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) +{ + struct se_device *dev = cmd->se_dev; + struct fd_dev *fd_dev = FD_DEV(dev); + + /* + * We are currently limited by the number of iovecs (2048) per + * single vfs_[writev,readv] call. + */ + if (cmd->data_length > FD_MAX_BYTES) { + pr_err("FILEIO: Not able to process I/O of %u bytes due to" + "FD_MAX_BYTES: %u iovec count limitation\n", + cmd->data_length, FD_MAX_BYTES); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO) + return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction); + return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction); +} + +enum { + Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, + Opt_fd_async_io, Opt_err +}; + +static match_table_t tokens = { + {Opt_fd_dev_name, "fd_dev_name=%s"}, + {Opt_fd_dev_size, "fd_dev_size=%s"}, + {Opt_fd_buffered_io, "fd_buffered_io=%d"}, + {Opt_fd_async_io, "fd_async_io=%d"}, + {Opt_err, NULL} +}; + +static ssize_t fd_set_configfs_dev_params(struct se_device *dev, + const char *page, ssize_t count) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + char *orig, *ptr, *arg_p, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, arg, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",\n")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_fd_dev_name: + if (match_strlcpy(fd_dev->fd_dev_name, &args[0], + FD_MAX_DEV_NAME) == 0) { + ret = -EINVAL; + break; + } + pr_debug("FILEIO: Referencing Path: %s\n", + fd_dev->fd_dev_name); + fd_dev->fbd_flags |= FBDF_HAS_PATH; + break; + case Opt_fd_dev_size: + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; + break; + } + ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoull() failed for" + " fd_dev_size=\n"); + goto out; + } + pr_debug("FILEIO: Referencing Size: %llu" + " bytes\n", fd_dev->fd_dev_size); + fd_dev->fbd_flags |= FBDF_HAS_SIZE; + break; + case Opt_fd_buffered_io: + ret = match_int(args, &arg); + if (ret) + goto out; + if (arg != 1) { + pr_err("bogus fd_buffered_io=%d value\n", arg); + ret = -EINVAL; + goto out; + } + + pr_debug("FILEIO: Using buffered I/O" + " operations for struct fd_dev\n"); + + fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE; + break; + case Opt_fd_async_io: + ret = match_int(args, &arg); + if (ret) + goto out; + if (arg != 1) { + pr_err("bogus fd_async_io=%d value\n", arg); + ret = -EINVAL; + goto out; + } + + pr_debug("FILEIO: Using async I/O" + " operations for struct fd_dev\n"); + + fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO; + break; + default: + break; + } + } + +out: + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + ssize_t bl = 0; + + bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); + bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s Async: %d\n", + fd_dev->fd_dev_name, fd_dev->fd_dev_size, + (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ? + "Buffered-WCE" : "O_DSYNC", + !!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)); + return bl; +} + +static sector_t fd_get_blocks(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *f = fd_dev->fd_file; + struct inode *i = f->f_mapping->host; + unsigned long long dev_size; + /* + * When using a file that references an underlying struct block_device, + * ensure dev_size is always based on the current inode size in order + * to handle underlying block_device resize operations. + */ + if (S_ISBLK(i->i_mode)) + dev_size = i_size_read(i); + else + dev_size = fd_dev->fd_dev_size; + + return div_u64(dev_size - dev->dev_attrib.block_size, + dev->dev_attrib.block_size); +} + +static int fd_init_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + struct file *prot_file, *file = fd_dev->fd_file; + struct inode *inode; + int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; + char buf[FD_MAX_DEV_PROT_NAME]; + + if (!file) { + pr_err("Unable to locate fd_dev->fd_file\n"); + return -ENODEV; + } + + inode = file->f_mapping->host; + if (S_ISBLK(inode->i_mode)) { + pr_err("FILEIO Protection emulation only supported on" + " !S_ISBLK\n"); + return -ENOSYS; + } + + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) + flags &= ~O_DSYNC; + + snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", + fd_dev->fd_dev_name); + + prot_file = filp_open(buf, flags, 0600); + if (IS_ERR(prot_file)) { + pr_err("filp_open(%s) failed\n", buf); + ret = PTR_ERR(prot_file); + return ret; + } + fd_dev->fd_prot_file = prot_file; + + return 0; +} + +static int fd_format_prot(struct se_device *dev) +{ + unsigned char *buf; + int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; + int ret; + + if (!dev->dev_attrib.pi_prot_type) { + pr_err("Unable to format_prot while pi_prot_type == 0\n"); + return -ENODEV; + } + + buf = vzalloc(unit_size); + if (!buf) { + pr_err("Unable to allocate FILEIO prot buf\n"); + return -ENOMEM; + } + + pr_debug("Using FILEIO prot_length: %llu\n", + (unsigned long long)(dev->transport->get_blocks(dev) + 1) * + dev->prot_length); + + ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, + buf, unit_size); + vfree(buf); + return ret; +} + +static void fd_free_prot(struct se_device *dev) +{ + struct fd_dev *fd_dev = FD_DEV(dev); + + if (!fd_dev->fd_prot_file) + return; + + filp_close(fd_dev->fd_prot_file, NULL); + fd_dev->fd_prot_file = NULL; +} + +static struct exec_cmd_ops fd_exec_cmd_ops = { + .execute_rw = fd_execute_rw, + .execute_sync_cache = fd_execute_sync_cache, + .execute_write_same = fd_execute_write_same, + .execute_unmap = fd_execute_unmap, +}; + +static sense_reason_t +fd_parse_cdb(struct se_cmd *cmd) +{ + return sbc_parse_cdb(cmd, &fd_exec_cmd_ops); +} + +static const struct target_backend_ops fileio_ops = { + .name = "fileio", + .inquiry_prod = "FILEIO", + .inquiry_rev = FD_VERSION, + .owner = THIS_MODULE, + .attach_hba = fd_attach_hba, + .detach_hba = fd_detach_hba, + .alloc_device = fd_alloc_device, + .configure_device = fd_configure_device, + .destroy_device = fd_destroy_device, + .free_device = fd_free_device, + .configure_unmap = fd_configure_unmap, + .parse_cdb = fd_parse_cdb, + .set_configfs_dev_params = fd_set_configfs_dev_params, + .show_configfs_dev_params = fd_show_configfs_dev_params, + .get_device_type = sbc_get_device_type, + .get_blocks = fd_get_blocks, + .init_prot = fd_init_prot, + .format_prot = fd_format_prot, + .free_prot = fd_free_prot, + .tb_dev_attrib_attrs = sbc_attrib_attrs, +}; + +static int __init fileio_module_init(void) +{ + return transport_backend_register(&fileio_ops); +} + +static void __exit fileio_module_exit(void) +{ + target_backend_unregister(&fileio_ops); +} + +MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(fileio_module_init); +module_exit(fileio_module_exit); diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h new file mode 100644 index 0000000000..929b1ecd54 --- /dev/null +++ b/drivers/target/target_core_file.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_FILE_H +#define TARGET_CORE_FILE_H + +#include <target/target_core_base.h> + +#define FD_VERSION "4.0" + +#define FD_MAX_DEV_NAME 256 +#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16 +#define FD_DEVICE_QUEUE_DEPTH 32 +#define FD_MAX_DEVICE_QUEUE_DEPTH 128 +#define FD_BLOCKSIZE 512 +/* + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call + */ +#define FD_MAX_BYTES 8388608 + +#define RRF_EMULATE_CDB 0x01 +#define RRF_GOT_LBA 0x02 + +#define FBDF_HAS_PATH 0x01 +#define FBDF_HAS_SIZE 0x02 +#define FDBD_HAS_BUFFERED_IO_WCE 0x04 +#define FDBD_HAS_ASYNC_IO 0x08 +#define FDBD_FORMAT_UNIT_SIZE 2048 + +struct fd_dev { + struct se_device dev; + + u32 fbd_flags; + unsigned char fd_dev_name[FD_MAX_DEV_NAME]; + /* Unique Ramdisk Device ID in Ramdisk HBA */ + u32 fd_dev_id; + /* Number of SG tables in sg_table_array */ + u32 fd_table_count; + u32 fd_queue_depth; + u32 fd_block_size; + unsigned long long fd_dev_size; + struct file *fd_file; + struct file *fd_prot_file; + /* FILEIO HBA device is connected to */ + struct fd_host *fd_host; +} ____cacheline_aligned; + +struct fd_host { + u32 fd_host_dev_id_count; + /* Unique FILEIO Host ID */ + u32 fd_host_id; +} ____cacheline_aligned; + +#endif /* TARGET_CORE_FILE_H */ diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c new file mode 100644 index 0000000000..d508b343ba --- /dev/null +++ b/drivers/target/target_core_hba.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_hba.c + * + * This file contains the TCM HBA Transport related functions. + * + * (c) Copyright 2003-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/net.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/in.h> +#include <linux/module.h> +#include <net/sock.h> +#include <net/tcp.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" + +static LIST_HEAD(backend_list); +static DEFINE_MUTEX(backend_mutex); + +static u32 hba_id_counter; + +static DEFINE_SPINLOCK(hba_lock); +static LIST_HEAD(hba_list); + + +int transport_backend_register(const struct target_backend_ops *ops) +{ + struct target_backend *tb, *old; + + tb = kzalloc(sizeof(*tb), GFP_KERNEL); + if (!tb) + return -ENOMEM; + tb->ops = ops; + + mutex_lock(&backend_mutex); + list_for_each_entry(old, &backend_list, list) { + if (!strcmp(old->ops->name, ops->name)) { + pr_err("backend %s already registered.\n", ops->name); + mutex_unlock(&backend_mutex); + kfree(tb); + return -EEXIST; + } + } + target_setup_backend_cits(tb); + list_add_tail(&tb->list, &backend_list); + mutex_unlock(&backend_mutex); + + pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n", + ops->name, ops->owner); + return 0; +} +EXPORT_SYMBOL(transport_backend_register); + +void target_backend_unregister(const struct target_backend_ops *ops) +{ + struct target_backend *tb; + + mutex_lock(&backend_mutex); + list_for_each_entry(tb, &backend_list, list) { + if (tb->ops == ops) { + list_del(&tb->list); + mutex_unlock(&backend_mutex); + /* + * Wait for any outstanding backend driver ->rcu_head + * callbacks to complete post TBO->free_device() -> + * call_rcu(), before allowing backend driver module + * unload of target_backend_ops->owner to proceed. + */ + rcu_barrier(); + kfree(tb); + return; + } + } + mutex_unlock(&backend_mutex); +} +EXPORT_SYMBOL(target_backend_unregister); + +static struct target_backend *core_get_backend(const char *name) +{ + struct target_backend *tb; + + mutex_lock(&backend_mutex); + list_for_each_entry(tb, &backend_list, list) { + if (!strcmp(tb->ops->name, name)) + goto found; + } + mutex_unlock(&backend_mutex); + return NULL; +found: + if (tb->ops->owner && !try_module_get(tb->ops->owner)) + tb = NULL; + mutex_unlock(&backend_mutex); + return tb; +} + +struct se_hba * +core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) +{ + struct se_hba *hba; + int ret = 0; + + hba = kzalloc(sizeof(*hba), GFP_KERNEL); + if (!hba) { + pr_err("Unable to allocate struct se_hba\n"); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&hba->device_lock); + mutex_init(&hba->hba_access_mutex); + + hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); + hba->hba_flags |= hba_flags; + + hba->backend = core_get_backend(plugin_name); + if (!hba->backend) { + ret = -EINVAL; + goto out_free_hba; + } + + ret = hba->backend->ops->attach_hba(hba, plugin_dep_id); + if (ret < 0) + goto out_module_put; + + spin_lock(&hba_lock); + hba->hba_id = hba_id_counter++; + list_add_tail(&hba->hba_node, &hba_list); + spin_unlock(&hba_lock); + + pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target" + " Core\n", hba->hba_id); + + return hba; + +out_module_put: + module_put(hba->backend->ops->owner); + hba->backend = NULL; +out_free_hba: + kfree(hba); + return ERR_PTR(ret); +} + +int +core_delete_hba(struct se_hba *hba) +{ + WARN_ON(hba->dev_count); + + hba->backend->ops->detach_hba(hba); + + spin_lock(&hba_lock); + list_del(&hba->hba_node); + spin_unlock(&hba_lock); + + pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" + " Core\n", hba->hba_id); + + module_put(hba->backend->ops->owner); + + hba->backend = NULL; + kfree(hba); + return 0; +} + +bool target_sense_desc_format(struct se_device *dev) +{ + return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false; +} diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c new file mode 100644 index 0000000000..a6a06a5f74 --- /dev/null +++ b/drivers/target/target_core_iblock.c @@ -0,0 +1,1196 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_iblock.c + * + * This file contains the Storage Engine <-> Linux BlockIO transport + * specific functions. + * + * (c) Copyright 2003-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/string.h> +#include <linux/parser.h> +#include <linux/timer.h> +#include <linux/fs.h> +#include <linux/blkdev.h> +#include <linux/blk-integrity.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/bio.h> +#include <linux/file.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/pr.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_common.h> +#include <asm/unaligned.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> + +#include "target_core_iblock.h" +#include "target_core_pr.h" + +#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ +#define IBLOCK_BIO_POOL_SIZE 128 + +static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) +{ + return container_of(dev, struct iblock_dev, dev); +} + + +static int iblock_attach_hba(struct se_hba *hba, u32 host_id) +{ + pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" + " Generic Target Core Stack %s\n", hba->hba_id, + IBLOCK_VERSION, TARGET_CORE_VERSION); + return 0; +} + +static void iblock_detach_hba(struct se_hba *hba) +{ +} + +static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) +{ + struct iblock_dev *ib_dev = NULL; + + ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); + if (!ib_dev) { + pr_err("Unable to allocate struct iblock_dev\n"); + return NULL; + } + + ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), + GFP_KERNEL); + if (!ib_dev->ibd_plug) + goto free_dev; + + pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); + + return &ib_dev->dev; + +free_dev: + kfree(ib_dev); + return NULL; +} + +static bool iblock_configure_unmap(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + + return target_configure_unmap_from_queue(&dev->dev_attrib, + ib_dev->ibd_bd); +} + +static int iblock_configure_device(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct request_queue *q; + struct block_device *bd = NULL; + struct blk_integrity *bi; + blk_mode_t mode = BLK_OPEN_READ; + unsigned int max_write_zeroes_sectors; + int ret; + + if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { + pr_err("Missing udev_path= parameters for IBLOCK\n"); + return -EINVAL; + } + + ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); + if (ret) { + pr_err("IBLOCK: Unable to create bioset\n"); + goto out; + } + + pr_debug( "IBLOCK: Claiming struct block_device: %s\n", + ib_dev->ibd_udev_path); + + if (!ib_dev->ibd_readonly) + mode |= BLK_OPEN_WRITE; + else + dev->dev_flags |= DF_READ_ONLY; + + bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev, NULL); + if (IS_ERR(bd)) { + ret = PTR_ERR(bd); + goto out_free_bioset; + } + ib_dev->ibd_bd = bd; + + q = bdev_get_queue(bd); + + dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); + dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q), + SECTOR_SIZE, + dev->dev_attrib.hw_block_size); + dev->dev_attrib.hw_queue_depth = q->nr_requests; + + /* + * Enable write same emulation for IBLOCK and use 0xFFFF as + * the smaller WRITE_SAME(10) only has a two-byte block count. + */ + max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd); + if (max_write_zeroes_sectors) + dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors; + else + dev->dev_attrib.max_write_same_len = 0xFFFF; + + if (bdev_nonrot(bd)) + dev->dev_attrib.is_nonrot = 1; + + bi = bdev_get_integrity(bd); + if (bi) { + struct bio_set *bs = &ib_dev->ibd_bio_set; + + if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") || + !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) { + pr_err("IBLOCK export of blk_integrity: %s not" + " supported\n", bi->profile->name); + ret = -ENOSYS; + goto out_blkdev_put; + } + + if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; + } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) { + dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; + } + + if (dev->dev_attrib.pi_prot_type) { + if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { + pr_err("Unable to allocate bioset for PI\n"); + ret = -ENOMEM; + goto out_blkdev_put; + } + pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", + &bs->bio_integrity_pool); + } + dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; + } + + return 0; + +out_blkdev_put: + blkdev_put(ib_dev->ibd_bd, ib_dev); +out_free_bioset: + bioset_exit(&ib_dev->ibd_bio_set); +out: + return ret; +} + +static void iblock_dev_call_rcu(struct rcu_head *p) +{ + struct se_device *dev = container_of(p, struct se_device, rcu_head); + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + + kfree(ib_dev->ibd_plug); + kfree(ib_dev); +} + +static void iblock_free_device(struct se_device *dev) +{ + call_rcu(&dev->rcu_head, iblock_dev_call_rcu); +} + +static void iblock_destroy_device(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + + if (ib_dev->ibd_bd != NULL) + blkdev_put(ib_dev->ibd_bd, ib_dev); + bioset_exit(&ib_dev->ibd_bio_set); +} + +static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev); + struct iblock_dev_plug *ib_dev_plug; + + /* + * Each se_device has a per cpu work this can be run from. We + * shouldn't have multiple threads on the same cpu calling this + * at the same time. + */ + ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()]; + if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags)) + return NULL; + + blk_start_plug(&ib_dev_plug->blk_plug); + return &ib_dev_plug->se_plug; +} + +static void iblock_unplug_device(struct se_dev_plug *se_plug) +{ + struct iblock_dev_plug *ib_dev_plug = container_of(se_plug, + struct iblock_dev_plug, se_plug); + + blk_finish_plug(&ib_dev_plug->blk_plug); + clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags); +} + +static sector_t iblock_get_blocks(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd); + unsigned long long blocks_long = + div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1; + + if (block_size == dev->dev_attrib.block_size) + return blocks_long; + + switch (block_size) { + case 4096: + switch (dev->dev_attrib.block_size) { + case 2048: + blocks_long <<= 1; + break; + case 1024: + blocks_long <<= 2; + break; + case 512: + blocks_long <<= 3; + break; + default: + break; + } + break; + case 2048: + switch (dev->dev_attrib.block_size) { + case 4096: + blocks_long >>= 1; + break; + case 1024: + blocks_long <<= 1; + break; + case 512: + blocks_long <<= 2; + break; + default: + break; + } + break; + case 1024: + switch (dev->dev_attrib.block_size) { + case 4096: + blocks_long >>= 2; + break; + case 2048: + blocks_long >>= 1; + break; + case 512: + blocks_long <<= 1; + break; + default: + break; + } + break; + case 512: + switch (dev->dev_attrib.block_size) { + case 4096: + blocks_long >>= 3; + break; + case 2048: + blocks_long >>= 2; + break; + case 1024: + blocks_long >>= 1; + break; + default: + break; + } + break; + default: + break; + } + + return blocks_long; +} + +static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status) +{ + struct iblock_req *ibr = cmd->priv; + u8 status; + + if (!refcount_dec_and_test(&ibr->pending)) + return; + + if (blk_status == BLK_STS_RESV_CONFLICT) + status = SAM_STAT_RESERVATION_CONFLICT; + else if (atomic_read(&ibr->ib_bio_err_cnt)) + status = SAM_STAT_CHECK_CONDITION; + else + status = SAM_STAT_GOOD; + + target_complete_cmd(cmd, status); + kfree(ibr); +} + +static void iblock_bio_done(struct bio *bio) +{ + struct se_cmd *cmd = bio->bi_private; + struct iblock_req *ibr = cmd->priv; + blk_status_t blk_status = bio->bi_status; + + if (bio->bi_status) { + pr_err("bio error: %p, err: %d\n", bio, bio->bi_status); + /* + * Bump the ib_bio_err_cnt and release bio. + */ + atomic_inc(&ibr->ib_bio_err_cnt); + smp_mb__after_atomic(); + } + + bio_put(bio); + + iblock_complete_cmd(cmd, blk_status); +} + +static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, + blk_opf_t opf) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); + struct bio *bio; + + /* + * Only allocate as many vector entries as the bio code allows us to, + * we'll loop later on until we have handled the whole request. + */ + bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, + GFP_NOIO, &ib_dev->ibd_bio_set); + if (!bio) { + pr_err("Unable to allocate memory for bio\n"); + return NULL; + } + + bio->bi_private = cmd; + bio->bi_end_io = &iblock_bio_done; + bio->bi_iter.bi_sector = lba; + + return bio; +} + +static void iblock_submit_bios(struct bio_list *list) +{ + struct blk_plug plug; + struct bio *bio; + /* + * The block layer handles nested plugs, so just plug/unplug to handle + * fabric drivers that didn't support batching and multi bio cmds. + */ + blk_start_plug(&plug); + while ((bio = bio_list_pop(list))) + submit_bio(bio); + blk_finish_plug(&plug); +} + +static void iblock_end_io_flush(struct bio *bio) +{ + struct se_cmd *cmd = bio->bi_private; + + if (bio->bi_status) + pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status); + + if (cmd) { + if (bio->bi_status) + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + else + target_complete_cmd(cmd, SAM_STAT_GOOD); + } + + bio_put(bio); +} + +/* + * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must + * always flush the whole cache. + */ +static sense_reason_t +iblock_execute_sync_cache(struct se_cmd *cmd) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); + int immed = (cmd->t_task_cdb[1] & 0x2); + struct bio *bio; + + /* + * If the Immediate bit is set, queue up the GOOD response + * for this SYNCHRONIZE_CACHE op. + */ + if (immed) + target_complete_cmd(cmd, SAM_STAT_GOOD); + + bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH, + GFP_KERNEL); + bio->bi_end_io = iblock_end_io_flush; + if (!immed) + bio->bi_private = cmd; + submit_bio(bio); + return 0; +} + +static sense_reason_t +iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) +{ + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; + struct se_device *dev = cmd->se_dev; + int ret; + + ret = blkdev_issue_discard(bdev, + target_to_linux_sector(dev, lba), + target_to_linux_sector(dev, nolb), + GFP_KERNEL); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", ret); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + return 0; +} + +static sense_reason_t +iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct scatterlist *sg = &cmd->t_data_sg[0]; + unsigned char *buf, *not_zero; + int ret; + + buf = kmap(sg_page(sg)) + sg->offset; + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + /* + * Fall back to block_execute_write_same() slow-path if + * incoming WRITE_SAME payload does not contain zeros. + */ + not_zero = memchr_inv(buf, 0x00, cmd->data_length); + kunmap(sg_page(sg)); + + if (not_zero) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + ret = blkdev_issue_zeroout(bdev, + target_to_linux_sector(dev, cmd->t_task_lba), + target_to_linux_sector(dev, + sbc_get_write_same_sectors(cmd)), + GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); + if (ret) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +static sense_reason_t +iblock_execute_write_same(struct se_cmd *cmd) +{ + struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; + struct iblock_req *ibr; + struct scatterlist *sg; + struct bio *bio; + struct bio_list list; + struct se_device *dev = cmd->se_dev; + sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); + sector_t sectors = target_to_linux_sector(dev, + sbc_get_write_same_sectors(cmd)); + + if (cmd->prot_op) { + pr_err("WRITE_SAME: Protection information with IBLOCK" + " backends not supported\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + if (!cmd->t_data_nents) + return TCM_INVALID_CDB_FIELD; + + sg = &cmd->t_data_sg[0]; + + if (cmd->t_data_nents > 1 || + sg->length != cmd->se_dev->dev_attrib.block_size) { + pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" + " block_size: %u\n", cmd->t_data_nents, sg->length, + cmd->se_dev->dev_attrib.block_size); + return TCM_INVALID_CDB_FIELD; + } + + if (bdev_write_zeroes_sectors(bdev)) { + if (!iblock_execute_zero_out(bdev, cmd)) + return 0; + } + + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; + cmd->priv = ibr; + + bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE); + if (!bio) + goto fail_free_ibr; + + bio_list_init(&list); + bio_list_add(&list, bio); + + refcount_set(&ibr->pending, 1); + + while (sectors) { + while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) + != sg->length) { + + bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE); + if (!bio) + goto fail_put_bios; + + refcount_inc(&ibr->pending); + bio_list_add(&list, bio); + } + + /* Always in 512 byte units for Linux/Block */ + block_lba += sg->length >> SECTOR_SHIFT; + sectors -= sg->length >> SECTOR_SHIFT; + } + + iblock_submit_bios(&list); + return 0; + +fail_put_bios: + while ((bio = bio_list_pop(&list))) + bio_put(bio); +fail_free_ibr: + kfree(ibr); +fail: + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +} + +enum { + Opt_udev_path, Opt_readonly, Opt_force, Opt_err +}; + +static match_table_t tokens = { + {Opt_udev_path, "udev_path=%s"}, + {Opt_readonly, "readonly=%d"}, + {Opt_force, "force=%d"}, + {Opt_err, NULL} +}; + +static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, + const char *page, ssize_t count) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + char *orig, *ptr, *arg_p, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, token; + unsigned long tmp_readonly; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",\n")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_udev_path: + if (ib_dev->ibd_bd) { + pr_err("Unable to set udev_path= while" + " ib_dev->ibd_bd exists\n"); + ret = -EEXIST; + goto out; + } + if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], + SE_UDEV_PATH_LEN) == 0) { + ret = -EINVAL; + break; + } + pr_debug("IBLOCK: Referencing UDEV path: %s\n", + ib_dev->ibd_udev_path); + ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; + break; + case Opt_readonly: + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; + break; + } + ret = kstrtoul(arg_p, 0, &tmp_readonly); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoul() failed for" + " readonly=\n"); + goto out; + } + ib_dev->ibd_readonly = tmp_readonly; + pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); + break; + case Opt_force: + break; + default: + break; + } + } + +out: + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + ssize_t bl = 0; + + if (bd) + bl += sprintf(b + bl, "iBlock device: %pg", bd); + if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) + bl += sprintf(b + bl, " UDEV PATH: %s", + ib_dev->ibd_udev_path); + bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); + + bl += sprintf(b + bl, " "); + if (bd) { + bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", + MAJOR(bd->bd_dev), MINOR(bd->bd_dev), + "CLAIMED: IBLOCK"); + } else { + bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); + } + + return bl; +} + +static int +iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, + struct sg_mapping_iter *miter) +{ + struct se_device *dev = cmd->se_dev; + struct blk_integrity *bi; + struct bio_integrity_payload *bip; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + int rc; + size_t resid, len; + + bi = bdev_get_integrity(ib_dev->ibd_bd); + if (!bi) { + pr_err("Unable to locate bio_integrity\n"); + return -ENODEV; + } + + bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents)); + if (IS_ERR(bip)) { + pr_err("Unable to allocate bio_integrity_payload\n"); + return PTR_ERR(bip); + } + + /* virtual start sector must be in integrity interval units */ + bip_set_seed(bip, bio->bi_iter.bi_sector >> + (bi->interval_exp - SECTOR_SHIFT)); + + pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, + (unsigned long long)bip->bip_iter.bi_sector); + + resid = bio_integrity_bytes(bi, bio_sectors(bio)); + while (resid > 0 && sg_miter_next(miter)) { + + len = min_t(size_t, miter->length, resid); + rc = bio_integrity_add_page(bio, miter->page, len, + offset_in_page(miter->addr)); + if (rc != len) { + pr_err("bio_integrity_add_page() failed; %d\n", rc); + sg_miter_stop(miter); + return -ENOMEM; + } + + pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n", + miter->page, len, offset_in_page(miter->addr)); + + resid -= len; + if (len < miter->length) + miter->consumed -= miter->length - len; + } + sg_miter_stop(miter); + + return 0; +} + +static sense_reason_t +iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) +{ + struct se_device *dev = cmd->se_dev; + sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); + struct iblock_req *ibr; + struct bio *bio; + struct bio_list list; + struct scatterlist *sg; + u32 sg_num = sgl_nents; + blk_opf_t opf; + unsigned bio_cnt; + int i, rc; + struct sg_mapping_iter prot_miter; + unsigned int miter_dir; + + if (data_direction == DMA_TO_DEVICE) { + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + + /* + * Set bits to indicate WRITE_ODIRECT so we are not throttled + * by WBT. + */ + opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; + /* + * Force writethrough using REQ_FUA if a volatile write cache + * is not enabled, or if initiator set the Force Unit Access bit. + */ + miter_dir = SG_MITER_TO_SG; + if (bdev_fua(ib_dev->ibd_bd)) { + if (cmd->se_cmd_flags & SCF_FUA) + opf |= REQ_FUA; + else if (!bdev_write_cache(ib_dev->ibd_bd)) + opf |= REQ_FUA; + } + } else { + opf = REQ_OP_READ; + miter_dir = SG_MITER_FROM_SG; + } + + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; + cmd->priv = ibr; + + if (!sgl_nents) { + refcount_set(&ibr->pending, 1); + iblock_complete_cmd(cmd, BLK_STS_OK); + return 0; + } + + bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf); + if (!bio) + goto fail_free_ibr; + + bio_list_init(&list); + bio_list_add(&list, bio); + + refcount_set(&ibr->pending, 2); + bio_cnt = 1; + + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) + sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents, + miter_dir); + + for_each_sg(sgl, sg, sgl_nents, i) { + /* + * XXX: if the length the device accepts is shorter than the + * length of the S/G list entry this will cause and + * endless loop. Better hope no driver uses huge pages. + */ + while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) + != sg->length) { + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { + rc = iblock_alloc_bip(cmd, bio, &prot_miter); + if (rc) + goto fail_put_bios; + } + + if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { + iblock_submit_bios(&list); + bio_cnt = 0; + } + + bio = iblock_get_bio(cmd, block_lba, sg_num, opf); + if (!bio) + goto fail_put_bios; + + refcount_inc(&ibr->pending); + bio_list_add(&list, bio); + bio_cnt++; + } + + /* Always in 512 byte units for Linux/Block */ + block_lba += sg->length >> SECTOR_SHIFT; + sg_num--; + } + + if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { + rc = iblock_alloc_bip(cmd, bio, &prot_miter); + if (rc) + goto fail_put_bios; + } + + iblock_submit_bios(&list); + iblock_complete_cmd(cmd, BLK_STS_OK); + return 0; + +fail_put_bios: + while ((bio = bio_list_pop(&list))) + bio_put(bio); +fail_free_ibr: + kfree(ibr); +fail: + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +} + +static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key, + u64 sa_key, u8 type, bool aptpl) +{ + struct se_device *dev = cmd->se_dev; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bdev = ib_dev->ibd_bd; + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; + int ret; + + if (!ops) { + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + switch (sa) { + case PRO_REGISTER: + case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: + if (!ops->pr_register) { + pr_err("block device does not support pr_register.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + /* The block layer pr ops always enables aptpl */ + if (!aptpl) + pr_info("APTPL not set by initiator, but will be used.\n"); + + ret = ops->pr_register(bdev, key, sa_key, + sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY); + break; + case PRO_RESERVE: + if (!ops->pr_reserve) { + pr_err("block_device does not support pr_reserve.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0); + break; + case PRO_CLEAR: + if (!ops->pr_clear) { + pr_err("block_device does not support pr_clear.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_clear(bdev, key); + break; + case PRO_PREEMPT: + case PRO_PREEMPT_AND_ABORT: + if (!ops->pr_clear) { + pr_err("block_device does not support pr_preempt.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_preempt(bdev, key, sa_key, + scsi_pr_type_to_block(type), + sa == PRO_PREEMPT_AND_ABORT); + break; + case PRO_RELEASE: + if (!ops->pr_clear) { + pr_err("block_device does not support pr_pclear.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type)); + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (!ret) + return TCM_NO_SENSE; + else if (ret == PR_STS_RESERVATION_CONFLICT) + return TCM_RESERVATION_CONFLICT; + else + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +} + +static void iblock_pr_report_caps(unsigned char *param_data) +{ + u16 len = 8; + + put_unaligned_be16(len, ¶m_data[0]); + /* + * When using the pr_ops passthrough method we only support exporting + * the device through one target port because from the backend module + * level we can't see the target port config. As a result we only + * support registration directly from the I_T nexus the cmd is sent + * through and do not set ATP_C here. + * + * The block layer pr_ops do not support passing in initiators so + * we don't set SIP_C here. + */ + /* PTPL_C: Persistence across Target Power Loss bit */ + param_data[2] |= 0x01; + /* + * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so + * set the TMV: Task Mask Valid bit. + */ + param_data[3] |= 0x80; + /* + * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 + */ + param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */ + /* + * PTPL_A: Persistence across Target Power Loss Active bit. The block + * layer pr ops always enables this so report it active. + */ + param_data[3] |= 0x01; + /* + * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37. + */ + param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ + param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ + param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ + param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ + param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ +} + +static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd, + unsigned char *param_data) +{ + struct se_device *dev = cmd->se_dev; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bdev = ib_dev->ibd_bd; + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; + int i, len, paths, data_offset; + struct pr_keys *keys; + sense_reason_t ret; + + if (!ops) { + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (!ops->pr_read_keys) { + pr_err("Block device does not support read_keys.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + /* + * We don't know what's under us, but dm-multipath will register every + * path with the same key, so start off with enough space for 16 paths. + * which is not a lot of memory and should normally be enough. + */ + paths = 16; +retry: + len = 8 * paths; + keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL); + if (!keys) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + keys->num_keys = paths; + if (!ops->pr_read_keys(bdev, keys)) { + if (keys->num_keys > paths) { + kfree(keys); + paths *= 2; + goto retry; + } + } else { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto free_keys; + } + + ret = TCM_NO_SENSE; + + put_unaligned_be32(keys->generation, ¶m_data[0]); + if (!keys->num_keys) { + put_unaligned_be32(0, ¶m_data[4]); + goto free_keys; + } + + put_unaligned_be32(8 * keys->num_keys, ¶m_data[4]); + + data_offset = 8; + for (i = 0; i < keys->num_keys; i++) { + if (data_offset + 8 > cmd->data_length) + break; + + put_unaligned_be64(keys->keys[i], ¶m_data[data_offset]); + data_offset += 8; + } + +free_keys: + kfree(keys); + return ret; +} + +static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd, + unsigned char *param_data) +{ + struct se_device *dev = cmd->se_dev; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bdev = ib_dev->ibd_bd; + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; + struct pr_held_reservation rsv = { }; + + if (!ops) { + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (!ops->pr_read_reservation) { + pr_err("Block device does not support read_keys.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (ops->pr_read_reservation(bdev, &rsv)) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + put_unaligned_be32(rsv.generation, ¶m_data[0]); + if (!block_pr_type_to_scsi(rsv.type)) { + put_unaligned_be32(0, ¶m_data[4]); + return TCM_NO_SENSE; + } + + put_unaligned_be32(16, ¶m_data[4]); + + if (cmd->data_length < 16) + return TCM_NO_SENSE; + put_unaligned_be64(rsv.key, ¶m_data[8]); + + if (cmd->data_length < 22) + return TCM_NO_SENSE; + param_data[21] = block_pr_type_to_scsi(rsv.type); + + return TCM_NO_SENSE; +} + +static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa, + unsigned char *param_data) +{ + sense_reason_t ret = TCM_NO_SENSE; + + switch (sa) { + case PRI_REPORT_CAPABILITIES: + iblock_pr_report_caps(param_data); + break; + case PRI_READ_KEYS: + ret = iblock_pr_read_keys(cmd, param_data); + break; + case PRI_READ_RESERVATION: + ret = iblock_pr_read_reservation(cmd, param_data); + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + return ret; +} + +static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int ret; + + ret = bdev_alignment_offset(bd); + if (ret == -1) + return 0; + + /* convert offset-bytes to offset-lbas */ + return ret / bdev_logical_block_size(bd); +} + +static unsigned int iblock_get_lbppbe(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + unsigned int logs_per_phys = + bdev_physical_block_size(bd) / bdev_logical_block_size(bd); + + return ilog2(logs_per_phys); +} + +static unsigned int iblock_get_io_min(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_min(bd); +} + +static unsigned int iblock_get_io_opt(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_opt(bd); +} + +static struct exec_cmd_ops iblock_exec_cmd_ops = { + .execute_rw = iblock_execute_rw, + .execute_sync_cache = iblock_execute_sync_cache, + .execute_write_same = iblock_execute_write_same, + .execute_unmap = iblock_execute_unmap, + .execute_pr_out = iblock_execute_pr_out, + .execute_pr_in = iblock_execute_pr_in, +}; + +static sense_reason_t +iblock_parse_cdb(struct se_cmd *cmd) +{ + return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops); +} + +static bool iblock_get_write_cache(struct se_device *dev) +{ + return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd); +} + +static const struct target_backend_ops iblock_ops = { + .name = "iblock", + .inquiry_prod = "IBLOCK", + .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR, + .inquiry_rev = IBLOCK_VERSION, + .owner = THIS_MODULE, + .attach_hba = iblock_attach_hba, + .detach_hba = iblock_detach_hba, + .alloc_device = iblock_alloc_device, + .configure_device = iblock_configure_device, + .destroy_device = iblock_destroy_device, + .free_device = iblock_free_device, + .configure_unmap = iblock_configure_unmap, + .plug_device = iblock_plug_device, + .unplug_device = iblock_unplug_device, + .parse_cdb = iblock_parse_cdb, + .set_configfs_dev_params = iblock_set_configfs_dev_params, + .show_configfs_dev_params = iblock_show_configfs_dev_params, + .get_device_type = sbc_get_device_type, + .get_blocks = iblock_get_blocks, + .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, + .get_lbppbe = iblock_get_lbppbe, + .get_io_min = iblock_get_io_min, + .get_io_opt = iblock_get_io_opt, + .get_write_cache = iblock_get_write_cache, + .tb_dev_attrib_attrs = sbc_attrib_attrs, +}; + +static int __init iblock_module_init(void) +{ + return transport_backend_register(&iblock_ops); +} + +static void __exit iblock_module_exit(void) +{ + target_backend_unregister(&iblock_ops); +} + +MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(iblock_module_init); +module_exit(iblock_module_exit); diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h new file mode 100644 index 0000000000..8c55375d2f --- /dev/null +++ b/drivers/target/target_core_iblock.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_IBLOCK_H +#define TARGET_CORE_IBLOCK_H + +#include <linux/atomic.h> +#include <linux/refcount.h> +#include <linux/blkdev.h> +#include <target/target_core_base.h> + +#define IBLOCK_VERSION "4.0" + +#define IBLOCK_MAX_CDBS 16 + +struct iblock_req { + refcount_t pending; + atomic_t ib_bio_err_cnt; +} ____cacheline_aligned; + +#define IBDF_HAS_UDEV_PATH 0x01 + +#define IBD_PLUGF_PLUGGED 0x01 + +struct iblock_dev_plug { + struct se_dev_plug se_plug; + struct blk_plug blk_plug; + unsigned long flags; +}; + +struct iblock_dev { + struct se_device dev; + unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; + u32 ibd_flags; + struct bio_set ibd_bio_set; + struct block_device *ibd_bd; + bool ibd_readonly; + struct iblock_dev_plug *ibd_plug; +} ____cacheline_aligned; + +#endif /* TARGET_CORE_IBLOCK_H */ diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h new file mode 100644 index 0000000000..408be26d2e --- /dev/null +++ b/drivers/target/target_core_internal.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_INTERNAL_H +#define TARGET_CORE_INTERNAL_H + +#include <linux/configfs.h> +#include <linux/list.h> +#include <linux/types.h> +#include <target/target_core_base.h> + +#define TARGET_CORE_NAME_MAX_LEN 64 +#define TARGET_FABRIC_NAME_SIZE 32 + +struct target_backend { + struct list_head list; + + const struct target_backend_ops *ops; + + struct config_item_type tb_dev_cit; + struct config_item_type tb_dev_attrib_cit; + struct config_item_type tb_dev_action_cit; + struct config_item_type tb_dev_pr_cit; + struct config_item_type tb_dev_wwn_cit; + struct config_item_type tb_dev_alua_tg_pt_gps_cit; + struct config_item_type tb_dev_stat_cit; +}; + +struct target_fabric_configfs { + atomic_t tf_access_cnt; + struct list_head tf_list; + struct config_group tf_group; + struct config_group tf_disc_group; + const struct target_core_fabric_ops *tf_ops; + + struct config_item_type tf_discovery_cit; + struct config_item_type tf_wwn_cit; + struct config_item_type tf_wwn_fabric_stats_cit; + struct config_item_type tf_wwn_param_cit; + struct config_item_type tf_tpg_cit; + struct config_item_type tf_tpg_base_cit; + struct config_item_type tf_tpg_lun_cit; + struct config_item_type tf_tpg_port_cit; + struct config_item_type tf_tpg_port_stat_cit; + struct config_item_type tf_tpg_np_cit; + struct config_item_type tf_tpg_np_base_cit; + struct config_item_type tf_tpg_attrib_cit; + struct config_item_type tf_tpg_auth_cit; + struct config_item_type tf_tpg_param_cit; + struct config_item_type tf_tpg_nacl_cit; + struct config_item_type tf_tpg_nacl_base_cit; + struct config_item_type tf_tpg_nacl_attrib_cit; + struct config_item_type tf_tpg_nacl_auth_cit; + struct config_item_type tf_tpg_nacl_param_cit; + struct config_item_type tf_tpg_nacl_stat_cit; + struct config_item_type tf_tpg_mappedlun_cit; + struct config_item_type tf_tpg_mappedlun_stat_cit; +}; + +/* target_core_alua.c */ +extern struct t10_alua_lu_gp *default_lu_gp; + +/* target_core_device.c */ +struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); +void target_pr_kref_release(struct kref *); +void core_free_device_list_for_node(struct se_node_acl *, + struct se_portal_group *); +void core_update_device_list_access(u64, bool, struct se_node_acl *); +struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64); +int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, + u64, bool, struct se_node_acl *, struct se_portal_group *); +void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *, + struct se_node_acl *, struct se_portal_group *); +void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); +int core_dev_add_lun(struct se_portal_group *, struct se_device *, + struct se_lun *lun); +void core_dev_del_lun(struct se_portal_group *, struct se_lun *); +struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, + struct se_node_acl *, u64, int *); +int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun_acl *, struct se_lun *lun, bool); +int core_dev_del_initiator_node_lun_acl(struct se_lun *, + struct se_lun_acl *); +void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun_acl *lacl); +int core_dev_setup_virtual_lun0(void); +void core_dev_release_virtual_lun0(void); +struct se_device *target_alloc_device(struct se_hba *hba, const char *name); +int target_configure_device(struct se_device *dev); +void target_free_device(struct se_device *); +int target_for_each_device(int (*fn)(struct se_device *dev, void *data), + void *data); +void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq); + +/* target_core_configfs.c */ +extern struct configfs_item_operations target_core_dev_item_ops; +void target_setup_backend_cits(struct target_backend *); + +/* target_core_fabric_configfs.c */ +int target_fabric_setup_cits(struct target_fabric_configfs *); + +/* target_core_fabric_lib.c */ +int target_get_pr_transport_id_len(struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, int *format_code); +int target_get_pr_transport_id(struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, int *format_code, + unsigned char *buf); +const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, + char *buf, u32 *out_tid_len, char **port_nexus_ptr); + +/* target_core_hba.c */ +struct se_hba *core_alloc_hba(const char *, u32, u32); +int core_delete_hba(struct se_hba *); + +/* target_core_tmr.c */ +void core_tmr_abort_task(struct se_device *, struct se_tmr_req *, + struct se_session *); +int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *, + struct list_head *, struct se_cmd *); + +/* target_core_tpg.c */ +extern struct se_device *g_lun0_dev; + +struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, + const char *); +void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *, + struct se_lun *); +void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); +struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64); +int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, + bool, struct se_device *); +void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); +struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg, + const char *initiatorname); +void core_tpg_del_initiator_node_acl(struct se_node_acl *acl); +int target_tpg_enable(struct se_portal_group *se_tpg); +int target_tpg_disable(struct se_portal_group *se_tpg); + +/* target_core_transport.c */ +int init_se_kmem_caches(void); +void release_se_kmem_caches(void); +u32 scsi_get_new_index(scsi_index_t); +void transport_subsystem_check_init(void); +unsigned char *transport_dump_cmd_direction(struct se_cmd *); +void transport_dump_dev_state(struct se_device *, char *, int *); +void transport_dump_dev_info(struct se_device *, struct se_lun *, + unsigned long long, char *, int *); +void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int); +int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); +int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); +int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); +void transport_clear_lun_ref(struct se_lun *); +sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); +void target_qf_do_work(struct work_struct *work); +void target_do_delayed_work(struct work_struct *work); +bool target_check_wce(struct se_device *dev); +bool target_check_fua(struct se_device *dev); +void __target_execute_cmd(struct se_cmd *, bool); +void target_queued_submit_work(struct work_struct *work); + +/* target_core_stat.c */ +void target_stat_setup_dev_default_groups(struct se_device *); +void target_stat_setup_port_default_groups(struct se_lun *); +void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); + +/* target_core_xcopy.c */ +extern struct se_portal_group xcopy_pt_tpg; + +/* target_core_configfs.c */ +#define DB_ROOT_LEN 4096 +#define DB_ROOT_DEFAULT "/var/target" +#define DB_ROOT_PREFERRED "/etc/target" + +extern char db_root[]; + +#endif /* TARGET_CORE_INTERNAL_H */ diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c new file mode 100644 index 0000000000..49d9167bb2 --- /dev/null +++ b/drivers/target/target_core_pr.c @@ -0,0 +1,4187 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_pr.c + * + * This file contains SPC-3 compliant persistent reservations and + * legacy SPC-2 reservations with compatible reservation handling (CRH=1) + * + * (c) Copyright 2009-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/vmalloc.h> +#include <linux/file.h> +#include <linux/fcntl.h> +#include <linux/fs.h> +#include <scsi/scsi_proto.h> +#include <asm/unaligned.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +/* + * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT) + */ +struct pr_transport_id_holder { + struct t10_pr_registration *dest_pr_reg; + struct se_portal_group *dest_tpg; + struct se_node_acl *dest_node_acl; + struct se_dev_entry *dest_se_deve; + struct list_head dest_list; +}; + +void core_pr_dump_initiator_port( + struct t10_pr_registration *pr_reg, + char *buf, + u32 size) +{ + if (!pr_reg->isid_present_at_reg) { + buf[0] = '\0'; + return; + } + + snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid); +} + +enum register_type { + REGISTER, + REGISTER_AND_IGNORE_EXISTING_KEY, + REGISTER_AND_MOVE, +}; + +enum preempt_type { + PREEMPT, + PREEMPT_AND_ABORT, +}; + +static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, + struct t10_pr_registration *, int, int); + +static int is_reservation_holder( + struct t10_pr_registration *pr_res_holder, + struct t10_pr_registration *pr_reg) +{ + int pr_res_type; + + if (pr_res_holder) { + pr_res_type = pr_res_holder->pr_res_type; + + return pr_res_holder == pr_reg || + pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG || + pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG; + } + return 0; +} + +static sense_reason_t +target_scsi2_reservation_check(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + + switch (cmd->t_task_cdb[0]) { + case INQUIRY: + case RELEASE: + case RELEASE_10: + return 0; + default: + break; + } + + if (!dev->reservation_holder || !sess) + return 0; + + if (dev->reservation_holder->se_node_acl != sess->se_node_acl) + return TCM_RESERVATION_CONFLICT; + + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) { + if (dev->dev_res_bin_isid != sess->sess_bin_isid) + return TCM_RESERVATION_CONFLICT; + } + + return 0; +} + +static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *, + struct se_node_acl *, struct se_session *); +static void core_scsi3_put_pr_reg(struct t10_pr_registration *); + +static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd) +{ + struct se_session *se_sess = cmd->se_sess; + struct se_device *dev = cmd->se_dev; + struct t10_pr_registration *pr_reg; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + int conflict = 0; + + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, + se_sess); + if (pr_reg) { + /* + * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE + * behavior + * + * A RESERVE(6) or RESERVE(10) command shall complete with GOOD + * status, but no reservation shall be established and the + * persistent reservation shall not be changed, if the command + * is received from a) and b) below. + * + * A RELEASE(6) or RELEASE(10) command shall complete with GOOD + * status, but the persistent reservation shall not be released, + * if the command is received from a) and b) + * + * a) An I_T nexus that is a persistent reservation holder; or + * b) An I_T nexus that is registered if a registrants only or + * all registrants type persistent reservation is present. + * + * In all other cases, a RESERVE(6) command, RESERVE(10) command, + * RELEASE(6) command, or RELEASE(10) command shall be processed + * as defined in SPC-2. + */ + if (pr_reg->pr_res_holder) { + core_scsi3_put_pr_reg(pr_reg); + return 1; + } + if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || + (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) || + (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { + core_scsi3_put_pr_reg(pr_reg); + return 1; + } + core_scsi3_put_pr_reg(pr_reg); + conflict = 1; + } else { + /* + * Following spc2r20 5.5.1 Reservations overview: + * + * If a logical unit has executed a PERSISTENT RESERVE OUT + * command with the REGISTER or the REGISTER AND IGNORE + * EXISTING KEY service action and is still registered by any + * initiator, all RESERVE commands and all RELEASE commands + * regardless of initiator shall conflict and shall terminate + * with a RESERVATION CONFLICT status. + */ + spin_lock(&pr_tmpl->registration_lock); + conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1; + spin_unlock(&pr_tmpl->registration_lock); + } + + if (conflict) { + pr_err("Received legacy SPC-2 RESERVE/RELEASE" + " while active SPC-3 registrations exist," + " returning RESERVATION_CONFLICT\n"); + return -EBUSY; + } + + return 0; +} + +void target_release_reservation(struct se_device *dev) +{ + dev->reservation_holder = NULL; + dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS; + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) { + dev->dev_res_bin_isid = 0; + dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID; + } +} + +sense_reason_t +target_scsi2_reservation_release(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + struct se_portal_group *tpg; + int rc; + + if (!sess || !sess->se_tpg) + goto out; + rc = target_check_scsi2_reservation_conflict(cmd); + if (rc == 1) + goto out; + if (rc < 0) + return TCM_RESERVATION_CONFLICT; + + spin_lock(&dev->dev_reservation_lock); + if (!dev->reservation_holder || !sess) + goto out_unlock; + + if (dev->reservation_holder->se_node_acl != sess->se_node_acl) + goto out_unlock; + + if (dev->dev_res_bin_isid != sess->sess_bin_isid) + goto out_unlock; + + target_release_reservation(dev); + tpg = sess->se_tpg; + pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->" + " MAPPED LUN: %llu for %s\n", + tpg->se_tpg_tfo->fabric_name, + cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, + sess->se_node_acl->initiatorname); + +out_unlock: + spin_unlock(&dev->dev_reservation_lock); +out: + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +sense_reason_t +target_scsi2_reservation_reserve(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + struct se_portal_group *tpg; + sense_reason_t ret = 0; + int rc; + + if ((cmd->t_task_cdb[1] & 0x01) && + (cmd->t_task_cdb[1] & 0x02)) { + pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + /* + * This is currently the case for target_core_mod passthrough struct se_cmd + * ops + */ + if (!sess || !sess->se_tpg) + goto out; + rc = target_check_scsi2_reservation_conflict(cmd); + if (rc == 1) + goto out; + + if (rc < 0) + return TCM_RESERVATION_CONFLICT; + + tpg = sess->se_tpg; + spin_lock(&dev->dev_reservation_lock); + if (dev->reservation_holder && + dev->reservation_holder->se_node_acl != sess->se_node_acl) { + pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n", + tpg->se_tpg_tfo->fabric_name); + pr_err("Original reserver LUN: %llu %s\n", + cmd->se_lun->unpacked_lun, + dev->reservation_holder->se_node_acl->initiatorname); + pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu" + " from %s \n", cmd->se_lun->unpacked_lun, + cmd->orig_fe_lun, + sess->se_node_acl->initiatorname); + ret = TCM_RESERVATION_CONFLICT; + goto out_unlock; + } + + dev->reservation_holder = sess; + dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS; + if (sess->sess_bin_isid != 0) { + dev->dev_res_bin_isid = sess->sess_bin_isid; + dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID; + } + pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu" + " for %s\n", tpg->se_tpg_tfo->fabric_name, + cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, + sess->se_node_acl->initiatorname); + +out_unlock: + spin_unlock(&dev->dev_reservation_lock); +out: + if (!ret) + target_complete_cmd(cmd, SAM_STAT_GOOD); + return ret; +} + + +/* + * Begin SPC-3/SPC-4 Persistent Reservations emulation support + * + * This function is called by those initiator ports who are *NOT* + * the active PR reservation holder when a reservation is present. + */ +static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type, + bool isid_mismatch) +{ + unsigned char *cdb = cmd->t_task_cdb; + struct se_session *se_sess = cmd->se_sess; + struct se_node_acl *nacl = se_sess->se_node_acl; + int other_cdb = 0; + int registered_nexus = 0, ret = 1; /* Conflict by default */ + int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ + int we = 0; /* Write Exclusive */ + int legacy = 0; /* Act like a legacy device and return + * RESERVATION CONFLICT on some CDBs */ + + if (isid_mismatch) { + registered_nexus = 0; + } else { + struct se_dev_entry *se_deve; + + rcu_read_lock(); + se_deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); + if (se_deve) + registered_nexus = test_bit(DEF_PR_REG_ACTIVE, + &se_deve->deve_flags); + rcu_read_unlock(); + } + + switch (pr_reg_type) { + case PR_TYPE_WRITE_EXCLUSIVE: + we = 1; + fallthrough; + case PR_TYPE_EXCLUSIVE_ACCESS: + /* + * Some commands are only allowed for the persistent reservation + * holder. + */ + break; + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + we = 1; + fallthrough; + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + /* + * Some commands are only allowed for registered I_T Nexuses. + */ + reg_only = 1; + break; + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + we = 1; + fallthrough; + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + /* + * Each registered I_T Nexus is a reservation holder. + */ + all_reg = 1; + break; + default: + return -EINVAL; + } + /* + * Referenced from spc4r17 table 45 for *NON* PR holder access + */ + switch (cdb[0]) { + case SECURITY_PROTOCOL_IN: + if (registered_nexus) + return 0; + ret = (we) ? 0 : 1; + break; + case MODE_SENSE: + case MODE_SENSE_10: + case READ_ATTRIBUTE: + case READ_BUFFER: + case RECEIVE_DIAGNOSTIC: + if (legacy) { + ret = 1; + break; + } + if (registered_nexus) { + ret = 0; + break; + } + ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ + break; + case PERSISTENT_RESERVE_OUT: + /* + * This follows PERSISTENT_RESERVE_OUT service actions that + * are allowed in the presence of various reservations. + * See spc4r17, table 46 + */ + switch (cdb[1] & 0x1f) { + case PRO_CLEAR: + case PRO_PREEMPT: + case PRO_PREEMPT_AND_ABORT: + ret = (registered_nexus) ? 0 : 1; + break; + case PRO_REGISTER: + case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: + ret = 0; + break; + case PRO_REGISTER_AND_MOVE: + case PRO_RESERVE: + ret = 1; + break; + case PRO_RELEASE: + ret = (registered_nexus) ? 0 : 1; + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_OUT service" + " action: 0x%02x\n", cdb[1] & 0x1f); + return -EINVAL; + } + break; + case RELEASE: + case RELEASE_10: + /* Handled by CRH=1 in target_scsi2_reservation_release() */ + ret = 0; + break; + case RESERVE: + case RESERVE_10: + /* Handled by CRH=1 in target_scsi2_reservation_reserve() */ + ret = 0; + break; + case TEST_UNIT_READY: + ret = (legacy) ? 1 : 0; /* Conflict for legacy */ + break; + case MAINTENANCE_IN: + switch (cdb[1] & 0x1f) { + case MI_MANAGEMENT_PROTOCOL_IN: + if (registered_nexus) { + ret = 0; + break; + } + ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ + break; + case MI_REPORT_SUPPORTED_OPERATION_CODES: + case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: + if (legacy) { + ret = 1; + break; + } + if (registered_nexus) { + ret = 0; + break; + } + ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ + break; + case MI_REPORT_ALIASES: + case MI_REPORT_IDENTIFYING_INFORMATION: + case MI_REPORT_PRIORITY: + case MI_REPORT_TARGET_PGS: + case MI_REPORT_TIMESTAMP: + ret = 0; /* Allowed */ + break; + default: + pr_err("Unknown MI Service Action: 0x%02x\n", + (cdb[1] & 0x1f)); + return -EINVAL; + } + break; + case ACCESS_CONTROL_IN: + case ACCESS_CONTROL_OUT: + case INQUIRY: + case LOG_SENSE: + case SERVICE_ACTION_IN_12: + case REPORT_LUNS: + case REQUEST_SENSE: + case PERSISTENT_RESERVE_IN: + ret = 0; /*/ Allowed CDBs */ + break; + default: + other_cdb = 1; + break; + } + /* + * Case where the CDB is explicitly allowed in the above switch + * statement. + */ + if (!ret && !other_cdb) { + pr_debug("Allowing explicit CDB: 0x%02x for %s" + " reservation holder\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + + return ret; + } + /* + * Check if write exclusive initiator ports *NOT* holding the + * WRITE_EXCLUSIVE_* reservation. + */ + if (we && !registered_nexus) { + if (cmd->data_direction == DMA_TO_DEVICE) { + /* + * Conflict for write exclusive + */ + pr_debug("%s Conflict for unregistered nexus" + " %s CDB: 0x%02x to %s reservation\n", + transport_dump_cmd_direction(cmd), + se_sess->se_node_acl->initiatorname, cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + return 1; + } else { + /* + * Allow non WRITE CDBs for all Write Exclusive + * PR TYPEs to pass for registered and + * non-registered_nexuxes NOT holding the reservation. + * + * We only make noise for the unregisterd nexuses, + * as we expect registered non-reservation holding + * nexuses to issue CDBs. + */ + + if (!registered_nexus) { + pr_debug("Allowing implicit CDB: 0x%02x" + " for %s reservation on unregistered" + " nexus\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + } + + return 0; + } + } else if ((reg_only) || (all_reg)) { + if (registered_nexus) { + /* + * For PR_*_REG_ONLY and PR_*_ALL_REG reservations, + * allow commands from registered nexuses. + */ + + pr_debug("Allowing implicit CDB: 0x%02x for %s" + " reservation\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + + return 0; + } + } else if (we && registered_nexus) { + /* + * Reads are allowed for Write Exclusive locks + * from all registrants. + */ + if (cmd->data_direction == DMA_FROM_DEVICE) { + pr_debug("Allowing READ CDB: 0x%02x for %s" + " reservation\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + + return 0; + } + } + pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" + " for %s reservation\n", transport_dump_cmd_direction(cmd), + (registered_nexus) ? "" : "un", + se_sess->se_node_acl->initiatorname, cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + + return 1; /* Conflict by default */ +} + +static sense_reason_t +target_scsi3_pr_reservation_check(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + u32 pr_reg_type; + bool isid_mismatch = false; + + if (!dev->dev_pr_res_holder) + return 0; + + pr_reg_type = dev->dev_pr_res_holder->pr_res_type; + cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; + if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) + goto check_nonholder; + + if (dev->dev_pr_res_holder->isid_present_at_reg) { + if (dev->dev_pr_res_holder->pr_reg_bin_isid != + sess->sess_bin_isid) { + isid_mismatch = true; + goto check_nonholder; + } + } + + return 0; + +check_nonholder: + if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type, isid_mismatch)) + return TCM_RESERVATION_CONFLICT; + return 0; +} + +static u32 core_scsi3_pr_generation(struct se_device *dev) +{ + u32 prg; + + /* + * PRGeneration field shall contain the value of a 32-bit wrapping + * counter mainted by the device server. + * + * Note that this is done regardless of Active Persist across + * Target PowerLoss (APTPL) + * + * See spc4r17 section 6.3.12 READ_KEYS service action + */ + spin_lock(&dev->dev_reservation_lock); + prg = dev->t10_pr.pr_generation++; + spin_unlock(&dev->dev_reservation_lock); + + return prg; +} + +static struct t10_pr_registration *__core_scsi3_do_alloc_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_lun *lun, + struct se_dev_entry *dest_deve, + u64 mapped_lun, + unsigned char *isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl) +{ + struct t10_pr_registration *pr_reg; + + pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); + if (!pr_reg) { + pr_err("Unable to allocate struct t10_pr_registration\n"); + return NULL; + } + + INIT_LIST_HEAD(&pr_reg->pr_reg_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); + atomic_set(&pr_reg->pr_res_holders, 0); + pr_reg->pr_reg_nacl = nacl; + /* + * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1, + * the se_dev_entry->pr_ref will have been already obtained by + * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration(). + * + * Otherwise, locate se_dev_entry now and obtain a reference until + * registration completes in __core_scsi3_add_registration(). + */ + if (dest_deve) { + pr_reg->pr_reg_deve = dest_deve; + } else { + rcu_read_lock(); + pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun); + if (!pr_reg->pr_reg_deve) { + rcu_read_unlock(); + pr_err("Unable to locate PR deve %s mapped_lun: %llu\n", + nacl->initiatorname, mapped_lun); + kmem_cache_free(t10_pr_reg_cache, pr_reg); + return NULL; + } + kref_get(&pr_reg->pr_reg_deve->pr_kref); + rcu_read_unlock(); + } + pr_reg->pr_res_mapped_lun = mapped_lun; + pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; + pr_reg->tg_pt_sep_rtpi = lun->lun_tpg->tpg_rtpi; + pr_reg->pr_res_key = sa_res_key; + pr_reg->pr_reg_all_tg_pt = all_tg_pt; + pr_reg->pr_reg_aptpl = aptpl; + /* + * If an ISID value for this SCSI Initiator Port exists, + * save it to the registration now. + */ + if (isid != NULL) { + pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); + snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); + pr_reg->isid_present_at_reg = 1; + } + + return pr_reg; +} + +static int core_scsi3_lunacl_depend_item(struct se_dev_entry *); +static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *); + +/* + * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0 + * modes. + */ +static struct t10_pr_registration *__core_scsi3_alloc_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_lun *lun, + struct se_dev_entry *deve, + u64 mapped_lun, + unsigned char *isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl) +{ + struct se_dev_entry *deve_tmp; + struct se_node_acl *nacl_tmp; + struct se_lun_acl *lacl_tmp; + struct se_lun *lun_tmp, *next, *dest_lun; + const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; + struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; + int ret; + /* + * Create a registration for the I_T Nexus upon which the + * PROUT REGISTER was received. + */ + pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun, + isid, sa_res_key, all_tg_pt, + aptpl); + if (!pr_reg) + return NULL; + /* + * Return pointer to pr_reg for ALL_TG_PT=0 + */ + if (!all_tg_pt) + return pr_reg; + /* + * Create list of matching SCSI Initiator Port registrations + * for ALL_TG_PT=1 + */ + spin_lock(&dev->se_port_lock); + list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) { + if (!percpu_ref_tryget_live(&lun_tmp->lun_ref)) + continue; + spin_unlock(&dev->se_port_lock); + + spin_lock(&lun_tmp->lun_deve_lock); + list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) { + /* + * This pointer will be NULL for demo mode MappedLUNs + * that have not been make explicit via a ConfigFS + * MappedLUN group for the SCSI Initiator Node ACL. + */ + if (!deve_tmp->se_lun_acl) + continue; + + lacl_tmp = deve_tmp->se_lun_acl; + nacl_tmp = lacl_tmp->se_lun_nacl; + /* + * Skip the matching struct se_node_acl that is allocated + * above.. + */ + if (nacl == nacl_tmp) + continue; + /* + * Only perform PR registrations for target ports on + * the same fabric module as the REGISTER w/ ALL_TG_PT=1 + * arrived. + */ + if (tfo != nacl_tmp->se_tpg->se_tpg_tfo) + continue; + /* + * Look for a matching Initiator Node ACL in ASCII format + */ + if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) + continue; + + kref_get(&deve_tmp->pr_kref); + spin_unlock(&lun_tmp->lun_deve_lock); + /* + * Grab a configfs group dependency that is released + * for the exception path at label out: below, or upon + * completion of adding ALL_TG_PT=1 registrations in + * __core_scsi3_add_registration() + */ + ret = core_scsi3_lunacl_depend_item(deve_tmp); + if (ret < 0) { + pr_err("core_scsi3_lunacl_depend" + "_item() failed\n"); + percpu_ref_put(&lun_tmp->lun_ref); + kref_put(&deve_tmp->pr_kref, target_pr_kref_release); + goto out; + } + /* + * Located a matching SCSI Initiator Port on a different + * port, allocate the pr_reg_atp and attach it to the + * pr_reg->pr_reg_atp_list that will be processed once + * the original *pr_reg is processed in + * __core_scsi3_add_registration() + */ + dest_lun = deve_tmp->se_lun; + + pr_reg_atp = __core_scsi3_do_alloc_registration(dev, + nacl_tmp, dest_lun, deve_tmp, + deve_tmp->mapped_lun, NULL, + sa_res_key, all_tg_pt, aptpl); + if (!pr_reg_atp) { + percpu_ref_put(&lun_tmp->lun_ref); + core_scsi3_lunacl_undepend_item(deve_tmp); + goto out; + } + + list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, + &pr_reg->pr_reg_atp_list); + spin_lock(&lun_tmp->lun_deve_lock); + } + spin_unlock(&lun_tmp->lun_deve_lock); + + spin_lock(&dev->se_port_lock); + percpu_ref_put(&lun_tmp->lun_ref); + } + spin_unlock(&dev->se_port_lock); + + return pr_reg; +out: + list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, + &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { + list_del(&pr_reg_tmp->pr_reg_atp_mem_list); + core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); + kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); + } + kmem_cache_free(t10_pr_reg_cache, pr_reg); + return NULL; +} + +int core_scsi3_alloc_aptpl_registration( + struct t10_reservation *pr_tmpl, + u64 sa_res_key, + unsigned char *i_port, + unsigned char *isid, + u64 mapped_lun, + unsigned char *t_port, + u16 tpgt, + u64 target_lun, + int res_holder, + int all_tg_pt, + u8 type) +{ + struct t10_pr_registration *pr_reg; + + if (!i_port || !t_port || !sa_res_key) { + pr_err("Illegal parameters for APTPL registration\n"); + return -EINVAL; + } + + pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); + if (!pr_reg) { + pr_err("Unable to allocate struct t10_pr_registration\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&pr_reg->pr_reg_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); + INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); + atomic_set(&pr_reg->pr_res_holders, 0); + pr_reg->pr_reg_nacl = NULL; + pr_reg->pr_reg_deve = NULL; + pr_reg->pr_res_mapped_lun = mapped_lun; + pr_reg->pr_aptpl_target_lun = target_lun; + pr_reg->pr_res_key = sa_res_key; + pr_reg->pr_reg_all_tg_pt = all_tg_pt; + pr_reg->pr_reg_aptpl = 1; + pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */ + pr_reg->pr_res_type = type; + /* + * If an ISID value had been saved in APTPL metadata for this + * SCSI Initiator Port, restore it now. + */ + if (isid != NULL) { + pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); + snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); + pr_reg->isid_present_at_reg = 1; + } + /* + * Copy the i_port and t_port information from caller. + */ + snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port); + snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port); + pr_reg->pr_reg_tpgt = tpgt; + /* + * Set pr_res_holder from caller, the pr_reg who is the reservation + * holder will get it's pointer set in core_scsi3_aptpl_reserve() once + * the Initiator Node LUN ACL from the fabric module is created for + * this registration. + */ + pr_reg->pr_res_holder = res_holder; + + list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); + pr_debug("SPC-3 PR APTPL Successfully added registration%s from" + " metadata\n", (res_holder) ? "+reservation" : ""); + return 0; +} + +static void core_scsi3_aptpl_reserve( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_node_acl *node_acl, + struct t10_pr_registration *pr_reg) +{ + char i_buf[PR_REG_ISID_ID_LEN] = { }; + + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + + spin_lock(&dev->dev_reservation_lock); + dev->dev_pr_res_holder = pr_reg; + spin_unlock(&dev->dev_reservation_lock); + + pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" + " new reservation holder TYPE: %s ALL_TG_PT: %d\n", + tpg->se_tpg_tfo->fabric_name, + core_scsi3_pr_dump_type(pr_reg->pr_res_type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", + tpg->se_tpg_tfo->fabric_name, node_acl->initiatorname, + i_buf); +} + +static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *, + struct t10_pr_registration *, enum register_type, int); + +static int __core_scsi3_check_aptpl_registration( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_lun *lun, + u64 target_lun, + struct se_node_acl *nacl, + u64 mapped_lun) +{ + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + unsigned char i_port[PR_APTPL_MAX_IPORT_LEN] = { }; + unsigned char t_port[PR_APTPL_MAX_TPORT_LEN] = { }; + u16 tpgt; + + /* + * Copy Initiator Port information from struct se_node_acl + */ + snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); + snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); + tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); + /* + * Look for the matching registrations+reservation from those + * created from APTPL metadata. Note that multiple registrations + * may exist for fabrics that use ISIDs in their SCSI Initiator Port + * TransportIDs. + */ + spin_lock(&pr_tmpl->aptpl_reg_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, + pr_reg_aptpl_list) { + + if (!strcmp(pr_reg->pr_iport, i_port) && + (pr_reg->pr_res_mapped_lun == mapped_lun) && + !(strcmp(pr_reg->pr_tport, t_port)) && + (pr_reg->pr_reg_tpgt == tpgt) && + (pr_reg->pr_aptpl_target_lun == target_lun)) { + /* + * Obtain the ->pr_reg_deve pointer + reference, that + * is released by __core_scsi3_add_registration() below. + */ + rcu_read_lock(); + pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun); + if (!pr_reg->pr_reg_deve) { + pr_err("Unable to locate PR APTPL %s mapped_lun:" + " %llu\n", nacl->initiatorname, mapped_lun); + rcu_read_unlock(); + continue; + } + kref_get(&pr_reg->pr_reg_deve->pr_kref); + rcu_read_unlock(); + + pr_reg->pr_reg_nacl = nacl; + pr_reg->tg_pt_sep_rtpi = lun->lun_tpg->tpg_rtpi; + list_del(&pr_reg->pr_reg_aptpl_list); + spin_unlock(&pr_tmpl->aptpl_reg_lock); + /* + * At this point all of the pointers in *pr_reg will + * be setup, so go ahead and add the registration. + */ + __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); + /* + * If this registration is the reservation holder, + * make that happen now.. + */ + if (pr_reg->pr_res_holder) + core_scsi3_aptpl_reserve(dev, tpg, + nacl, pr_reg); + /* + * Reenable pr_aptpl_active to accept new metadata + * updates once the SCSI device is active again.. + */ + spin_lock(&pr_tmpl->aptpl_reg_lock); + pr_tmpl->pr_aptpl_active = 1; + } + } + spin_unlock(&pr_tmpl->aptpl_reg_lock); + + return 0; +} + +int core_scsi3_check_aptpl_registration( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_lun *lun, + struct se_node_acl *nacl, + u64 mapped_lun) +{ + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) + return 0; + + return __core_scsi3_check_aptpl_registration(dev, tpg, lun, + lun->unpacked_lun, nacl, + mapped_lun); +} + +static void __core_scsi3_dump_registration( + const struct target_core_fabric_ops *tfo, + struct se_device *dev, + struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, + enum register_type register_type) +{ + struct se_portal_group *se_tpg = nacl->se_tpg; + char i_buf[PR_REG_ISID_ID_LEN] = { }; + + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + + pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" + " Node: %s%s\n", tfo->fabric_name, (register_type == REGISTER_AND_MOVE) ? + "_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? + "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, + i_buf); + pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", + tfo->fabric_name, tfo->tpg_get_wwn(se_tpg), + tfo->tpg_get_tag(se_tpg)); + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + " Port(s)\n", tfo->fabric_name, + (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", + dev->transport->name); + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + " 0x%08x APTPL: %d\n", tfo->fabric_name, + pr_reg->pr_res_key, pr_reg->pr_res_generation, + pr_reg->pr_reg_aptpl); +} + +static void __core_scsi3_add_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct t10_pr_registration *pr_reg, + enum register_type register_type, + int register_move) +{ + const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; + struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + struct se_dev_entry *deve; + + /* + * Increment PRgeneration counter for struct se_device upon a successful + * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action + * + * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service + * action, the struct se_device->dev_reservation_lock will already be held, + * so we do not call core_scsi3_pr_generation() which grabs the lock + * for the REGISTER. + */ + pr_reg->pr_res_generation = (register_move) ? + dev->t10_pr.pr_generation++ : + core_scsi3_pr_generation(dev); + + spin_lock(&pr_tmpl->registration_lock); + list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list); + + __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); + spin_unlock(&pr_tmpl->registration_lock); + /* + * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. + */ + if (!pr_reg->pr_reg_all_tg_pt || register_move) + goto out; + /* + * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 + * allocated in __core_scsi3_alloc_registration() + */ + list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, + &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { + struct se_node_acl *nacl_tmp = pr_reg_tmp->pr_reg_nacl; + + list_del(&pr_reg_tmp->pr_reg_atp_mem_list); + + pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev); + + spin_lock(&pr_tmpl->registration_lock); + list_add_tail(&pr_reg_tmp->pr_reg_list, + &pr_tmpl->registration_list); + + __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, + register_type); + spin_unlock(&pr_tmpl->registration_lock); + /* + * Drop configfs group dependency reference and deve->pr_kref + * obtained from __core_scsi3_alloc_registration() code. + */ + rcu_read_lock(); + deve = pr_reg_tmp->pr_reg_deve; + if (deve) { + set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); + core_scsi3_lunacl_undepend_item(deve); + pr_reg_tmp->pr_reg_deve = NULL; + } + rcu_read_unlock(); + } +out: + /* + * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration() + */ + rcu_read_lock(); + deve = pr_reg->pr_reg_deve; + if (deve) { + set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); + kref_put(&deve->pr_kref, target_pr_kref_release); + pr_reg->pr_reg_deve = NULL; + } + rcu_read_unlock(); +} + +static int core_scsi3_alloc_registration( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_lun *lun, + struct se_dev_entry *deve, + u64 mapped_lun, + unsigned char *isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl, + enum register_type register_type, + int register_move) +{ + struct t10_pr_registration *pr_reg; + + pr_reg = __core_scsi3_alloc_registration(dev, nacl, lun, deve, mapped_lun, + isid, sa_res_key, all_tg_pt, + aptpl); + if (!pr_reg) + return -EPERM; + + __core_scsi3_add_registration(dev, nacl, pr_reg, + register_type, register_move); + return 0; +} + +static struct t10_pr_registration *__core_scsi3_locate_pr_reg( + struct se_device *dev, + struct se_node_acl *nacl, + unsigned char *isid) +{ + struct t10_reservation *pr_tmpl = &dev->t10_pr; + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + /* + * First look for a matching struct se_node_acl + */ + if (pr_reg->pr_reg_nacl != nacl) + continue; + + /* + * If this registration does NOT contain a fabric provided + * ISID, then we have found a match. + */ + if (!pr_reg->isid_present_at_reg) { + atomic_inc_mb(&pr_reg->pr_res_holders); + spin_unlock(&pr_tmpl->registration_lock); + return pr_reg; + } + /* + * If the *pr_reg contains a fabric defined ISID for multi-value + * SCSI Initiator Port TransportIDs, then we expect a valid + * matching ISID to be provided by the local SCSI Initiator Port. + */ + if (!isid) + continue; + if (strcmp(isid, pr_reg->pr_reg_isid)) + continue; + + atomic_inc_mb(&pr_reg->pr_res_holders); + spin_unlock(&pr_tmpl->registration_lock); + return pr_reg; + } + spin_unlock(&pr_tmpl->registration_lock); + + return NULL; +} + +static struct t10_pr_registration *core_scsi3_locate_pr_reg( + struct se_device *dev, + struct se_node_acl *nacl, + struct se_session *sess) +{ + struct se_portal_group *tpg = nacl->se_tpg; + unsigned char buf[PR_REG_ISID_LEN] = { }; + unsigned char *isid_ptr = NULL; + + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { + tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0], + PR_REG_ISID_LEN); + isid_ptr = &buf[0]; + } + + return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr); +} + +static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) +{ + atomic_dec_mb(&pr_reg->pr_res_holders); +} + +static int core_scsi3_check_implicit_release( + struct se_device *dev, + struct t10_pr_registration *pr_reg) +{ + struct se_node_acl *nacl = pr_reg->pr_reg_nacl; + struct t10_pr_registration *pr_res_holder; + int ret = 0; + + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (!pr_res_holder) { + spin_unlock(&dev->dev_reservation_lock); + return ret; + } + if (pr_res_holder == pr_reg) { + /* + * Perform an implicit RELEASE if the registration that + * is being released is holding the reservation. + * + * From spc4r17, section 5.7.11.1: + * + * e) If the I_T nexus is the persistent reservation holder + * and the persistent reservation is not an all registrants + * type, then a PERSISTENT RESERVE OUT command with REGISTER + * service action or REGISTER AND IGNORE EXISTING KEY + * service action with the SERVICE ACTION RESERVATION KEY + * field set to zero (see 5.7.11.3). + */ + __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1); + ret = 1; + /* + * For 'All Registrants' reservation types, all existing + * registrations are still processed as reservation holders + * in core_scsi3_pr_seq_non_holder() after the initial + * reservation holder is implicitly released here. + */ + } else if (pr_reg->pr_reg_all_tg_pt && + (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, + pr_reg->pr_reg_nacl->initiatorname)) && + (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { + pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1" + " UNREGISTER while existing reservation with matching" + " key 0x%016Lx is present from another SCSI Initiator" + " Port\n", pr_reg->pr_res_key); + ret = -EPERM; + } + spin_unlock(&dev->dev_reservation_lock); + + return ret; +} + +static void __core_scsi3_free_registration( + struct se_device *dev, + struct t10_pr_registration *pr_reg, + struct list_head *preempt_and_abort_list, + int dec_holders) + __releases(&pr_tmpl->registration_lock) + __acquires(&pr_tmpl->registration_lock) +{ + const struct target_core_fabric_ops *tfo = + pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + struct se_node_acl *nacl = pr_reg->pr_reg_nacl; + struct se_dev_entry *deve; + char i_buf[PR_REG_ISID_ID_LEN] = { }; + + lockdep_assert_held(&pr_tmpl->registration_lock); + + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + + if (!list_empty(&pr_reg->pr_reg_list)) + list_del(&pr_reg->pr_reg_list); + /* + * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), + * so call core_scsi3_put_pr_reg() to decrement our reference. + */ + if (dec_holders) + core_scsi3_put_pr_reg(pr_reg); + + spin_unlock(&pr_tmpl->registration_lock); + /* + * Wait until all reference from any other I_T nexuses for this + * *pr_reg have been released. Because list_del() is called above, + * the last core_scsi3_put_pr_reg(pr_reg) will release this reference + * count back to zero, and we release *pr_reg. + */ + while (atomic_read(&pr_reg->pr_res_holders) != 0) { + pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", + tfo->fabric_name); + cpu_relax(); + } + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, pr_reg->pr_res_mapped_lun); + if (deve) + clear_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); + rcu_read_unlock(); + + spin_lock(&pr_tmpl->registration_lock); + pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" + " Node: %s%s\n", tfo->fabric_name, + pr_reg->pr_reg_nacl->initiatorname, + i_buf); + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + " Port(s)\n", tfo->fabric_name, + (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", + dev->transport->name); + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + " 0x%08x\n", tfo->fabric_name, pr_reg->pr_res_key, + pr_reg->pr_res_generation); + + if (!preempt_and_abort_list) { + pr_reg->pr_reg_deve = NULL; + pr_reg->pr_reg_nacl = NULL; + kmem_cache_free(t10_pr_reg_cache, pr_reg); + return; + } + /* + * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list + * are released once the ABORT_TASK_SET has completed.. + */ + list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list); +} + +void core_scsi3_free_pr_reg_from_nacl( + struct se_device *dev, + struct se_node_acl *nacl) +{ + struct t10_reservation *pr_tmpl = &dev->t10_pr; + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; + bool free_reg = false; + /* + * If the passed se_node_acl matches the reservation holder, + * release the reservation. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if ((pr_res_holder != NULL) && + (pr_res_holder->pr_reg_nacl == nacl)) { + __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1); + free_reg = true; + } + spin_unlock(&dev->dev_reservation_lock); + /* + * Release any registration associated with the struct se_node_acl. + */ + spin_lock(&pr_tmpl->registration_lock); + if (pr_res_holder && free_reg) + __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0); + + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + if (pr_reg->pr_reg_nacl != nacl) + continue; + + __core_scsi3_free_registration(dev, pr_reg, NULL, 0); + } + spin_unlock(&pr_tmpl->registration_lock); +} + +void core_scsi3_free_all_registrations( + struct se_device *dev) +{ + struct t10_reservation *pr_tmpl = &dev->t10_pr; + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; + + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (pr_res_holder != NULL) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + __core_scsi3_complete_pro_release(dev, pr_res_nacl, + pr_res_holder, 0, 0); + } + spin_unlock(&dev->dev_reservation_lock); + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + __core_scsi3_free_registration(dev, pr_reg, NULL, 0); + } + spin_unlock(&pr_tmpl->registration_lock); + + spin_lock(&pr_tmpl->aptpl_reg_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, + pr_reg_aptpl_list) { + list_del(&pr_reg->pr_reg_aptpl_list); + kmem_cache_free(t10_pr_reg_cache, pr_reg); + } + spin_unlock(&pr_tmpl->aptpl_reg_lock); +} + +static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) +{ + return target_depend_item(&tpg->tpg_group.cg_item); +} + +static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) +{ + target_undepend_item(&tpg->tpg_group.cg_item); + atomic_dec_mb(&tpg->tpg_pr_ref_count); +} + +static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) +{ + if (nacl->dynamic_node_acl) + return 0; + return target_depend_item(&nacl->acl_group.cg_item); +} + +static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) +{ + if (!nacl->dynamic_node_acl) + target_undepend_item(&nacl->acl_group.cg_item); + atomic_dec_mb(&nacl->acl_pr_ref_count); +} + +static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) +{ + /* + * For nacl->dynamic_node_acl=1 + */ + if (!se_deve->se_lun_acl) + return 0; + + return target_depend_item(&se_deve->se_lun_acl->se_lun_group.cg_item); +} + +static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) +{ + /* + * For nacl->dynamic_node_acl=1 + */ + if (!se_deve->se_lun_acl) { + kref_put(&se_deve->pr_kref, target_pr_kref_release); + return; + } + + target_undepend_item(&se_deve->se_lun_acl->se_lun_group.cg_item); + kref_put(&se_deve->pr_kref, target_pr_kref_release); +} + +static sense_reason_t +core_scsi3_decode_spec_i_port( + struct se_cmd *cmd, + struct se_portal_group *tpg, + unsigned char *l_isid, + u64 sa_res_key, + int all_tg_pt, + int aptpl) +{ + struct se_device *dev = cmd->se_dev; + struct se_portal_group *dest_tpg = NULL, *tmp_tpg; + struct se_session *se_sess = cmd->se_sess; + struct se_node_acl *dest_node_acl = NULL; + struct se_dev_entry *dest_se_deve = NULL; + struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; + struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; + LIST_HEAD(tid_dest_list); + struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; + unsigned char *buf, *ptr, proto_ident; + const unsigned char *i_str = NULL; + char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; + sense_reason_t ret; + u32 tpdl, tid_len = 0; + u32 dest_rtpi = 0; + + /* + * Allocate a struct pr_transport_id_holder and setup the + * local_node_acl pointer and add to struct list_head tid_dest_list + * for add registration processing in the loop of tid_dest_list below. + */ + tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); + if (!tidh_new) { + pr_err("Unable to allocate tidh_new\n"); + return TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + } + INIT_LIST_HEAD(&tidh_new->dest_list); + tidh_new->dest_tpg = tpg; + tidh_new->dest_node_acl = se_sess->se_node_acl; + + local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, + se_sess->se_node_acl, cmd->se_lun, + NULL, cmd->orig_fe_lun, l_isid, + sa_res_key, all_tg_pt, aptpl); + if (!local_pr_reg) { + kfree(tidh_new); + return TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + } + + if (core_scsi3_lunacl_depend_item(local_pr_reg->pr_reg_deve)) { + kfree(tidh_new); + kref_put(&local_pr_reg->pr_reg_deve->pr_kref, + target_pr_kref_release); + kmem_cache_free(t10_pr_reg_cache, local_pr_reg); + return TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + } + + tidh_new->dest_pr_reg = local_pr_reg; + list_add_tail(&tidh_new->dest_list, &tid_dest_list); + + if (cmd->data_length < 28) { + pr_warn("SPC-PR: Received PR OUT parameter list" + " length too small: %u\n", cmd->data_length); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) { + ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + goto out; + } + + /* + * For a PERSISTENT RESERVE OUT specify initiator ports payload, + * first extract TransportID Parameter Data Length, and make sure + * the value matches up to the SCSI expected data transfer length. + */ + tpdl = get_unaligned_be32(&buf[24]); + + if ((tpdl + 28) != cmd->data_length) { + pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" + " does not equal CDB data_length: %u\n", tpdl, + cmd->data_length); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_unmap; + } + /* + * Start processing the received transport IDs using the + * receiving I_T Nexus portal's fabric dependent methods to + * obtain the SCSI Initiator Port/Device Identifiers. + */ + ptr = &buf[28]; + + while (tpdl > 0) { + struct se_lun *dest_lun, *tmp_lun; + + proto_ident = (ptr[0] & 0x0f); + dest_tpg = NULL; + + spin_lock(&dev->se_port_lock); + list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) { + tmp_tpg = tmp_lun->lun_tpg; + + /* + * Look for the matching proto_ident provided by + * the received TransportID + */ + if (tmp_tpg->proto_id != proto_ident) + continue; + dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi; + + iport_ptr = NULL; + i_str = target_parse_pr_out_transport_id(tmp_tpg, + ptr, &tid_len, &iport_ptr); + if (!i_str) + continue; + /* + * Determine if this SCSI device server requires that + * SCSI Intiatior TransportID w/ ISIDs is enforced + * for fabric modules (iSCSI) requiring them. + */ + if (tpg->se_tpg_tfo->sess_get_initiator_sid && + dev->dev_attrib.enforce_pr_isids && + !iport_ptr) { + pr_warn("SPC-PR: enforce_pr_isids is set but a isid has not been sent in the SPEC_I_PT data for %s.", + i_str); + ret = TCM_INVALID_PARAMETER_LIST; + spin_unlock(&dev->se_port_lock); + goto out_unmap; + } + + atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count); + spin_unlock(&dev->se_port_lock); + + if (core_scsi3_tpg_depend_item(tmp_tpg)) { + pr_err(" core_scsi3_tpg_depend_item()" + " for tmp_tpg\n"); + atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out_unmap; + } + /* + * Locate the destination initiator ACL to be registered + * from the decoded fabric module specific TransportID + * at *i_str. + */ + mutex_lock(&tmp_tpg->acl_node_mutex); + dest_node_acl = __core_tpg_get_initiator_node_acl( + tmp_tpg, i_str); + if (dest_node_acl) + atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); + mutex_unlock(&tmp_tpg->acl_node_mutex); + + if (!dest_node_acl) { + core_scsi3_tpg_undepend_item(tmp_tpg); + spin_lock(&dev->se_port_lock); + continue; + } + + if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { + pr_err("configfs_depend_item() failed" + " for dest_node_acl->acl_group\n"); + atomic_dec_mb(&dest_node_acl->acl_pr_ref_count); + core_scsi3_tpg_undepend_item(tmp_tpg); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out_unmap; + } + + dest_tpg = tmp_tpg; + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s Port RTPI: %u\n", + dest_tpg->se_tpg_tfo->fabric_name, + dest_node_acl->initiatorname, dest_rtpi); + + spin_lock(&dev->se_port_lock); + break; + } + spin_unlock(&dev->se_port_lock); + + if (!dest_tpg) { + pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" + " dest_tpg\n"); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_unmap; + } + + pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" + " tid_len: %d for %s + %s\n", + dest_tpg->se_tpg_tfo->fabric_name, cmd->data_length, + tpdl, tid_len, i_str, iport_ptr); + + if (tid_len > tpdl) { + pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" + " %u for Transport ID: %s\n", tid_len, ptr); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_unmap; + } + /* + * Locate the desintation struct se_dev_entry pointer for matching + * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus + * Target Port. + */ + dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, + dest_rtpi); + if (!dest_se_deve) { + pr_err("Unable to locate %s dest_se_deve from destination RTPI: %u\n", + dest_tpg->se_tpg_tfo->fabric_name, + dest_rtpi); + + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_unmap; + } + + if (core_scsi3_lunacl_depend_item(dest_se_deve)) { + pr_err("core_scsi3_lunacl_depend_item()" + " failed\n"); + kref_put(&dest_se_deve->pr_kref, target_pr_kref_release); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out_unmap; + } + + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" + " dest_se_deve mapped_lun: %llu\n", + dest_tpg->se_tpg_tfo->fabric_name, + dest_node_acl->initiatorname, dest_se_deve->mapped_lun); + + /* + * Skip any TransportIDs that already have a registration for + * this target port. + */ + pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl, + iport_ptr); + if (pr_reg_e) { + core_scsi3_put_pr_reg(pr_reg_e); + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ptr += tid_len; + tpdl -= tid_len; + tid_len = 0; + continue; + } + /* + * Allocate a struct pr_transport_id_holder and setup + * the dest_node_acl and dest_se_deve pointers for the + * loop below. + */ + tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), + GFP_KERNEL); + if (!tidh_new) { + pr_err("Unable to allocate tidh_new\n"); + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out_unmap; + } + INIT_LIST_HEAD(&tidh_new->dest_list); + tidh_new->dest_tpg = dest_tpg; + tidh_new->dest_node_acl = dest_node_acl; + tidh_new->dest_se_deve = dest_se_deve; + + /* + * Allocate, but do NOT add the registration for the + * TransportID referenced SCSI Initiator port. This + * done because of the following from spc4r17 in section + * 6.14.3 wrt SPEC_I_PT: + * + * "If a registration fails for any initiator port (e.g., if th + * logical unit does not have enough resources available to + * hold the registration information), no registrations shall be + * made, and the command shall be terminated with + * CHECK CONDITION status." + * + * That means we call __core_scsi3_alloc_registration() here, + * and then call __core_scsi3_add_registration() in the + * 2nd loop which will never fail. + */ + dest_lun = dest_se_deve->se_lun; + + dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, + dest_node_acl, dest_lun, dest_se_deve, + dest_se_deve->mapped_lun, iport_ptr, + sa_res_key, all_tg_pt, aptpl); + if (!dest_pr_reg) { + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + kfree(tidh_new); + ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + goto out_unmap; + } + tidh_new->dest_pr_reg = dest_pr_reg; + list_add_tail(&tidh_new->dest_list, &tid_dest_list); + + ptr += tid_len; + tpdl -= tid_len; + tid_len = 0; + + } + + transport_kunmap_data_sg(cmd); + + /* + * Go ahead and create a registrations from tid_dest_list for the + * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl + * and dest_se_deve. + * + * The SA Reservation Key from the PROUT is set for the + * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1 + * means that the TransportID Initiator port will be + * registered on all of the target ports in the SCSI target device + * ALL_TG_PT=0 means the registration will only be for the + * SCSI target port the PROUT REGISTER with SPEC_I_PT=1 + * was received. + */ + list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { + dest_tpg = tidh->dest_tpg; + dest_node_acl = tidh->dest_node_acl; + dest_se_deve = tidh->dest_se_deve; + dest_pr_reg = tidh->dest_pr_reg; + + list_del(&tidh->dest_list); + kfree(tidh); + + memset(i_buf, 0, PR_REG_ISID_ID_LEN); + core_pr_dump_initiator_port(dest_pr_reg, i_buf, PR_REG_ISID_ID_LEN); + + __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, + dest_pr_reg, 0, 0); + + pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" + " registered Transport ID for Node: %s%s Mapped LUN:" + " %llu\n", dest_tpg->se_tpg_tfo->fabric_name, + dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? + dest_se_deve->mapped_lun : 0); + + if (dest_pr_reg == local_pr_reg) + continue; + + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + } + + return 0; +out_unmap: + transport_kunmap_data_sg(cmd); +out: + /* + * For the failure case, release everything from tid_dest_list + * including *dest_pr_reg and the configfs dependances.. + */ + list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { + bool is_local = false; + + dest_tpg = tidh->dest_tpg; + dest_node_acl = tidh->dest_node_acl; + dest_se_deve = tidh->dest_se_deve; + dest_pr_reg = tidh->dest_pr_reg; + + if (dest_pr_reg == local_pr_reg) + is_local = true; + + list_del(&tidh->dest_list); + kfree(tidh); + /* + * Release any extra ALL_TG_PT=1 registrations for + * the SPEC_I_PT=1 case. + */ + list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, + &dest_pr_reg->pr_reg_atp_list, + pr_reg_atp_mem_list) { + list_del(&pr_reg_tmp->pr_reg_atp_mem_list); + core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); + kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); + } + + kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); + core_scsi3_lunacl_undepend_item(dest_se_deve); + + if (is_local) + continue; + + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_tpg); + } + return ret; +} + +static int core_scsi3_update_aptpl_buf( + struct se_device *dev, + unsigned char *buf, + u32 pr_aptpl_buf_len) +{ + struct se_portal_group *tpg; + struct t10_pr_registration *pr_reg; + unsigned char tmp[512], isid_buf[32]; + ssize_t len = 0; + int reg_count = 0; + int ret = 0; + + spin_lock(&dev->dev_reservation_lock); + spin_lock(&dev->t10_pr.registration_lock); + /* + * Walk the registration list.. + */ + list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, + pr_reg_list) { + + tmp[0] = '\0'; + isid_buf[0] = '\0'; + tpg = pr_reg->pr_reg_nacl->se_tpg; + /* + * Write out any ISID value to APTPL metadata that was included + * in the original registration. + */ + if (pr_reg->isid_present_at_reg) + snprintf(isid_buf, 32, "initiator_sid=%s\n", + pr_reg->pr_reg_isid); + /* + * Include special metadata if the pr_reg matches the + * reservation holder. + */ + if (dev->dev_pr_res_holder == pr_reg) { + snprintf(tmp, 512, "PR_REG_START: %d" + "\ninitiator_fabric=%s\n" + "initiator_node=%s\n%s" + "sa_res_key=%llu\n" + "res_holder=1\nres_type=%02x\n" + "res_scope=%02x\nres_all_tg_pt=%d\n" + "mapped_lun=%llu\n", reg_count, + tpg->se_tpg_tfo->fabric_name, + pr_reg->pr_reg_nacl->initiatorname, isid_buf, + pr_reg->pr_res_key, pr_reg->pr_res_type, + pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, + pr_reg->pr_res_mapped_lun); + } else { + snprintf(tmp, 512, "PR_REG_START: %d\n" + "initiator_fabric=%s\ninitiator_node=%s\n%s" + "sa_res_key=%llu\nres_holder=0\n" + "res_all_tg_pt=%d\nmapped_lun=%llu\n", + reg_count, tpg->se_tpg_tfo->fabric_name, + pr_reg->pr_reg_nacl->initiatorname, isid_buf, + pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, + pr_reg->pr_res_mapped_lun); + } + + if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { + pr_err("Unable to update renaming APTPL metadata," + " reallocating larger buffer\n"); + ret = -EMSGSIZE; + goto out; + } + len += sprintf(buf+len, "%s", tmp); + + /* + * Include information about the associated SCSI target port. + */ + snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" + "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:" + " %d\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), + pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun, + reg_count); + + if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { + pr_err("Unable to update renaming APTPL metadata," + " reallocating larger buffer\n"); + ret = -EMSGSIZE; + goto out; + } + len += sprintf(buf+len, "%s", tmp); + reg_count++; + } + + if (!reg_count) + len += sprintf(buf+len, "No Registrations or Reservations"); + +out: + spin_unlock(&dev->t10_pr.registration_lock); + spin_unlock(&dev->dev_reservation_lock); + + return ret; +} + +static int __core_scsi3_write_aptpl_to_file( + struct se_device *dev, + unsigned char *buf) +{ + struct t10_wwn *wwn = &dev->t10_wwn; + struct file *file; + int flags = O_RDWR | O_CREAT | O_TRUNC; + char *path; + u32 pr_aptpl_buf_len; + int ret; + loff_t pos = 0; + + path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root, + &wwn->unit_serial[0]); + if (!path) + return -ENOMEM; + + file = filp_open(path, flags, 0600); + if (IS_ERR(file)) { + pr_err("filp_open(%s) for APTPL metadata" + " failed\n", path); + kfree(path); + return PTR_ERR(file); + } + + pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */ + + ret = kernel_write(file, buf, pr_aptpl_buf_len, &pos); + + if (ret < 0) + pr_debug("Error writing APTPL metadata file: %s\n", path); + fput(file); + kfree(path); + + return (ret < 0) ? -EIO : 0; +} + +/* + * Clear the APTPL metadata if APTPL has been disabled, otherwise + * write out the updated metadata to struct file for this SCSI device. + */ +static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) +{ + unsigned char *buf; + int rc, len = PR_APTPL_BUF_LEN; + + if (!aptpl) { + char *null_buf = "No Registrations or Reservations\n"; + + rc = __core_scsi3_write_aptpl_to_file(dev, null_buf); + dev->t10_pr.pr_aptpl_active = 0; + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated\n"); + + if (rc) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + return 0; + } +retry: + buf = vzalloc(len); + if (!buf) + return TCM_OUT_OF_RESOURCES; + + rc = core_scsi3_update_aptpl_buf(dev, buf, len); + if (rc < 0) { + vfree(buf); + len *= 2; + goto retry; + } + + rc = __core_scsi3_write_aptpl_to_file(dev, buf); + if (rc != 0) { + pr_err("SPC-3 PR: Could not update APTPL\n"); + vfree(buf); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + dev->t10_pr.pr_aptpl_active = 1; + vfree(buf); + pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); + return 0; +} + +static sense_reason_t +core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, + bool aptpl, bool all_tg_pt, bool spec_i_pt, enum register_type register_type) +{ + struct se_session *se_sess = cmd->se_sess; + struct se_device *dev = cmd->se_dev; + struct se_lun *se_lun = cmd->se_lun; + struct se_portal_group *se_tpg; + struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + unsigned char isid_buf[PR_REG_ISID_LEN] = { }; + unsigned char *isid_ptr = NULL; + sense_reason_t ret = TCM_NO_SENSE; + int pr_holder = 0, type; + + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + se_tpg = se_sess->se_tpg; + + if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { + se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0], + PR_REG_ISID_LEN); + isid_ptr = &isid_buf[0]; + } + /* + * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 + */ + pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); + if (!pr_reg) { + if (res_key) { + pr_warn("SPC-3 PR: Reservation Key non-zero" + " for SA REGISTER, returning CONFLICT\n"); + return TCM_RESERVATION_CONFLICT; + } + /* + * Do nothing but return GOOD status. + */ + if (!sa_res_key) + return 0; + + if (!spec_i_pt) { + /* + * Perform the Service Action REGISTER on the Initiator + * Port Endpoint that the PRO was received from on the + * Logical Unit of the SCSI device server. + */ + if (core_scsi3_alloc_registration(cmd->se_dev, + se_sess->se_node_acl, cmd->se_lun, + NULL, cmd->orig_fe_lun, isid_ptr, + sa_res_key, all_tg_pt, aptpl, + register_type, 0)) { + pr_err("Unable to allocate" + " struct t10_pr_registration\n"); + return TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + } + } else { + /* + * Register both the Initiator port that received + * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI + * TransportID from Parameter list and loop through + * fabric dependent parameter list while calling + * logic from of core_scsi3_alloc_registration() for + * each TransportID provided SCSI Initiator Port/Device + */ + ret = core_scsi3_decode_spec_i_port(cmd, se_tpg, + isid_ptr, sa_res_key, all_tg_pt, aptpl); + if (ret != 0) + return ret; + } + return core_scsi3_update_and_write_aptpl(dev, aptpl); + } + + /* ok, existing registration */ + + if ((register_type == REGISTER) && (res_key != pr_reg->pr_res_key)) { + pr_err("SPC-3 PR REGISTER: Received" + " res_key: 0x%016Lx does not match" + " existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, + pr_reg->pr_res_key); + ret = TCM_RESERVATION_CONFLICT; + goto out; + } + + if (spec_i_pt) { + pr_err("SPC-3 PR REGISTER: SPEC_I_PT" + " set on a registered nexus\n"); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + + /* + * An existing ALL_TG_PT=1 registration being released + * must also set ALL_TG_PT=1 in the incoming PROUT. + */ + if (pr_reg->pr_reg_all_tg_pt && !all_tg_pt) { + pr_err("SPC-3 PR REGISTER: ALL_TG_PT=1" + " registration exists, but ALL_TG_PT=1 bit not" + " present in received PROUT\n"); + ret = TCM_INVALID_CDB_FIELD; + goto out; + } + + /* + * sa_res_key=1 Change Reservation Key for registered I_T Nexus. + */ + if (sa_res_key) { + /* + * Increment PRgeneration counter for struct se_device" + * upon a successful REGISTER, see spc4r17 section 6.3.2 + * READ_KEYS service action. + */ + pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev); + pr_reg->pr_res_key = sa_res_key; + pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" + " Key for %s to: 0x%016Lx PRgeneration:" + " 0x%08x\n", cmd->se_tfo->fabric_name, + (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "", + pr_reg->pr_reg_nacl->initiatorname, + pr_reg->pr_res_key, pr_reg->pr_res_generation); + + } else { + /* + * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. + */ + type = pr_reg->pr_res_type; + pr_holder = core_scsi3_check_implicit_release(cmd->se_dev, + pr_reg); + if (pr_holder < 0) { + ret = TCM_RESERVATION_CONFLICT; + goto out; + } + + spin_lock(&pr_tmpl->registration_lock); + /* + * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port + * and matching pr_res_key. + */ + if (pr_reg->pr_reg_all_tg_pt) { + list_for_each_entry_safe(pr_reg_p, pr_reg_tmp, + &pr_tmpl->registration_list, + pr_reg_list) { + + if (!pr_reg_p->pr_reg_all_tg_pt) + continue; + if (pr_reg_p->pr_res_key != res_key) + continue; + if (pr_reg == pr_reg_p) + continue; + if (strcmp(pr_reg->pr_reg_nacl->initiatorname, + pr_reg_p->pr_reg_nacl->initiatorname)) + continue; + + __core_scsi3_free_registration(dev, + pr_reg_p, NULL, 0); + } + } + + /* + * Release the calling I_T Nexus registration now.. + */ + __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); + pr_reg = NULL; + + /* + * From spc4r17, section 5.7.11.3 Unregistering + * + * If the persistent reservation is a registrants only + * type, the device server shall establish a unit + * attention condition for the initiator port associated + * with every registered I_T nexus except for the I_T + * nexus on which the PERSISTENT RESERVE OUT command was + * received, with the additional sense code set to + * RESERVATIONS RELEASED. + */ + if (pr_holder && + (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || + type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { + list_for_each_entry(pr_reg_p, + &pr_tmpl->registration_list, + pr_reg_list) { + + target_ua_allocate_lun( + pr_reg_p->pr_reg_nacl, + pr_reg_p->pr_res_mapped_lun, + 0x2A, + ASCQ_2AH_RESERVATIONS_RELEASED); + } + } + + spin_unlock(&pr_tmpl->registration_lock); + } + + ret = core_scsi3_update_and_write_aptpl(dev, aptpl); + +out: + if (pr_reg) + core_scsi3_put_pr_reg(pr_reg); + return ret; +} + +unsigned char *core_scsi3_pr_dump_type(int type) +{ + switch (type) { + case PR_TYPE_WRITE_EXCLUSIVE: + return "Write Exclusive Access"; + case PR_TYPE_EXCLUSIVE_ACCESS: + return "Exclusive Access"; + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + return "Write Exclusive Access, Registrants Only"; + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + return "Exclusive Access, Registrants Only"; + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + return "Write Exclusive Access, All Registrants"; + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + return "Exclusive Access, All Registrants"; + default: + break; + } + + return "Unknown SPC-3 PR Type"; +} + +static sense_reason_t +core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *se_sess = cmd->se_sess; + struct se_lun *se_lun = cmd->se_lun; + struct t10_pr_registration *pr_reg, *pr_res_holder; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + char i_buf[PR_REG_ISID_ID_LEN] = { }; + sense_reason_t ret; + + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, + se_sess); + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for RESERVE\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * An application client creates a persistent reservation by issuing + * a PERSISTENT RESERVE OUT command with RESERVE service action through + * a registered I_T nexus with the following parameters: + * a) RESERVATION KEY set to the value of the reservation key that is + * registered with the logical unit for the I_T nexus; and + */ + if (res_key != pr_reg->pr_res_key) { + pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx" + " does not match existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, pr_reg->pr_res_key); + ret = TCM_RESERVATION_CONFLICT; + goto out_put_pr_reg; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * From above: + * b) TYPE field and SCOPE field set to the persistent reservation + * being created. + * + * Only one persistent reservation is allowed at a time per logical unit + * and that persistent reservation has a scope of LU_SCOPE. + */ + if (scope != PR_SCOPE_LU_SCOPE) { + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_put_pr_reg; + } + /* + * See if we have an existing PR reservation holder pointer at + * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration + * *pr_res_holder. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (pr_res_holder) { + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * If the device server receives a PERSISTENT RESERVE OUT + * command from an I_T nexus other than a persistent reservation + * holder (see 5.7.10) that attempts to create a persistent + * reservation when a persistent reservation already exists for + * the logical unit, then the command shall be completed with + * RESERVATION CONFLICT status. + */ + if (!is_reservation_holder(pr_res_holder, pr_reg)) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + pr_err("SPC-3 PR: Attempted RESERVE from" + " [%s]: %s while reservation already held by" + " [%s]: %s, returning RESERVATION_CONFLICT\n", + cmd->se_tfo->fabric_name, + se_sess->se_node_acl->initiatorname, + pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name, + pr_res_holder->pr_reg_nacl->initiatorname); + + spin_unlock(&dev->dev_reservation_lock); + ret = TCM_RESERVATION_CONFLICT; + goto out_put_pr_reg; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * If a persistent reservation holder attempts to modify the + * type or scope of an existing persistent reservation, the + * command shall be completed with RESERVATION CONFLICT status. + */ + if ((pr_res_holder->pr_res_type != type) || + (pr_res_holder->pr_res_scope != scope)) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + pr_err("SPC-3 PR: Attempted RESERVE from" + " [%s]: %s trying to change TYPE and/or SCOPE," + " while reservation already held by [%s]: %s," + " returning RESERVATION_CONFLICT\n", + cmd->se_tfo->fabric_name, + se_sess->se_node_acl->initiatorname, + pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name, + pr_res_holder->pr_reg_nacl->initiatorname); + + spin_unlock(&dev->dev_reservation_lock); + ret = TCM_RESERVATION_CONFLICT; + goto out_put_pr_reg; + } + /* + * From spc4r17 Section 5.7.9: Reserving: + * + * If the device server receives a PERSISTENT RESERVE OUT + * command with RESERVE service action where the TYPE field and + * the SCOPE field contain the same values as the existing type + * and scope from a persistent reservation holder, it shall not + * make any change to the existing persistent reservation and + * shall completethe command with GOOD status. + */ + spin_unlock(&dev->dev_reservation_lock); + ret = 0; + goto out_put_pr_reg; + } + /* + * Otherwise, our *pr_reg becomes the PR reservation holder for said + * TYPE/SCOPE. Also set the received scope and type in *pr_reg. + */ + pr_reg->pr_res_scope = scope; + pr_reg->pr_res_type = type; + pr_reg->pr_res_holder = 1; + dev->dev_pr_res_holder = pr_reg; + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + + pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" + " reservation holder TYPE: %s ALL_TG_PT: %d\n", + cmd->se_tfo->fabric_name, core_scsi3_pr_dump_type(type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", + cmd->se_tfo->fabric_name, + se_sess->se_node_acl->initiatorname, + i_buf); + spin_unlock(&dev->dev_reservation_lock); + + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); + + ret = 0; +out_put_pr_reg: + core_scsi3_put_pr_reg(pr_reg); + return ret; +} + +static sense_reason_t +core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope, + u64 res_key) +{ + switch (type) { + case PR_TYPE_WRITE_EXCLUSIVE: + case PR_TYPE_EXCLUSIVE_ACCESS: + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + return core_scsi3_pro_reserve(cmd, type, scope, res_key); + default: + pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" + " 0x%02x\n", type); + return TCM_INVALID_CDB_FIELD; + } +} + +static void __core_scsi3_complete_pro_release( + struct se_device *dev, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int explicit, + int unreg) +{ + const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; + char i_buf[PR_REG_ISID_ID_LEN] = { }; + int pr_res_type = 0, pr_res_scope = 0; + + lockdep_assert_held(&dev->dev_reservation_lock); + + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + /* + * Go ahead and release the current PR reservation holder. + * If an All Registrants reservation is currently active and + * a unregister operation is requested, replace the current + * dev_pr_res_holder with another active registration. + */ + if (dev->dev_pr_res_holder) { + pr_res_type = dev->dev_pr_res_holder->pr_res_type; + pr_res_scope = dev->dev_pr_res_holder->pr_res_scope; + dev->dev_pr_res_holder->pr_res_type = 0; + dev->dev_pr_res_holder->pr_res_scope = 0; + dev->dev_pr_res_holder->pr_res_holder = 0; + dev->dev_pr_res_holder = NULL; + } + if (!unreg) + goto out; + + spin_lock(&dev->t10_pr.registration_lock); + list_del_init(&pr_reg->pr_reg_list); + /* + * If the I_T nexus is a reservation holder, the persistent reservation + * is of an all registrants type, and the I_T nexus is the last remaining + * registered I_T nexus, then the device server shall also release the + * persistent reservation. + */ + if (!list_empty(&dev->t10_pr.registration_list) && + ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) { + dev->dev_pr_res_holder = + list_entry(dev->t10_pr.registration_list.next, + struct t10_pr_registration, pr_reg_list); + dev->dev_pr_res_holder->pr_res_type = pr_res_type; + dev->dev_pr_res_holder->pr_res_scope = pr_res_scope; + dev->dev_pr_res_holder->pr_res_holder = 1; + } + spin_unlock(&dev->t10_pr.registration_lock); +out: + if (!dev->dev_pr_res_holder) { + pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" + " reservation holder TYPE: %s ALL_TG_PT: %d\n", + tfo->fabric_name, (explicit) ? "explicit" : + "implicit", core_scsi3_pr_dump_type(pr_res_type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + } + pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", + tfo->fabric_name, se_nacl->initiatorname, + i_buf); + /* + * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE + */ + pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0; +} + +static sense_reason_t +core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, + u64 res_key) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *se_sess = cmd->se_sess; + struct se_lun *se_lun = cmd->se_lun; + struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + sense_reason_t ret = 0; + + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for RELEASE\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * From spc4r17 Section 5.7.11.2 Releasing: + * + * If there is no persistent reservation or in response to a persistent + * reservation release request from a registered I_T nexus that is not a + * persistent reservation holder (see 5.7.10), the device server shall + * do the following: + * + * a) Not release the persistent reservation, if any; + * b) Not remove any registrations; and + * c) Complete the command with GOOD status. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (!pr_res_holder) { + /* + * No persistent reservation, return GOOD status. + */ + spin_unlock(&dev->dev_reservation_lock); + goto out_put_pr_reg; + } + + if (!is_reservation_holder(pr_res_holder, pr_reg)) { + /* + * Release request from a registered I_T nexus that is not a + * persistent reservation holder. return GOOD status. + */ + spin_unlock(&dev->dev_reservation_lock); + goto out_put_pr_reg; + } + + /* + * From spc4r17 Section 5.7.11.2 Releasing: + * + * Only the persistent reservation holder (see 5.7.10) is allowed to + * release a persistent reservation. + * + * An application client releases the persistent reservation by issuing + * a PERSISTENT RESERVE OUT command with RELEASE service action through + * an I_T nexus that is a persistent reservation holder with the + * following parameters: + * + * a) RESERVATION KEY field set to the value of the reservation key + * that is registered with the logical unit for the I_T nexus; + */ + if (res_key != pr_reg->pr_res_key) { + pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx" + " does not match existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, pr_reg->pr_res_key); + spin_unlock(&dev->dev_reservation_lock); + ret = TCM_RESERVATION_CONFLICT; + goto out_put_pr_reg; + } + /* + * From spc4r17 Section 5.7.11.2 Releasing and above: + * + * b) TYPE field and SCOPE field set to match the persistent + * reservation being released. + */ + if ((pr_res_holder->pr_res_type != type) || + (pr_res_holder->pr_res_scope != scope)) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + pr_err("SPC-3 PR RELEASE: Attempted to release" + " reservation from [%s]: %s with different TYPE " + "and/or SCOPE while reservation already held by" + " [%s]: %s, returning RESERVATION_CONFLICT\n", + cmd->se_tfo->fabric_name, + se_sess->se_node_acl->initiatorname, + pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name, + pr_res_holder->pr_reg_nacl->initiatorname); + + spin_unlock(&dev->dev_reservation_lock); + ret = TCM_RESERVATION_CONFLICT; + goto out_put_pr_reg; + } + /* + * In response to a persistent reservation release request from the + * persistent reservation holder the device server shall perform a + * release by doing the following as an uninterrupted series of actions: + * a) Release the persistent reservation; + * b) Not remove any registration(s); + * c) If the released persistent reservation is a registrants only type + * or all registrants type persistent reservation, + * the device server shall establish a unit attention condition for + * the initiator port associated with every regis- + * tered I_T nexus other than I_T nexus on which the PERSISTENT + * RESERVE OUT command with RELEASE service action was received, + * with the additional sense code set to RESERVATIONS RELEASED; and + * d) If the persistent reservation is of any other type, the device + * server shall not establish a unit attention condition. + */ + __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, + pr_reg, 1, 0); + + spin_unlock(&dev->dev_reservation_lock); + + if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) && + (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) && + (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && + (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { + /* + * If no UNIT ATTENTION conditions will be established for + * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS + * go ahead and check for APTPL=1 update+write below + */ + goto write_aptpl; + } + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, + pr_reg_list) { + /* + * Do not establish a UNIT ATTENTION condition + * for the calling I_T Nexus + */ + if (pr_reg_p == pr_reg) + continue; + + target_ua_allocate_lun(pr_reg_p->pr_reg_nacl, + pr_reg_p->pr_res_mapped_lun, + 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); + } + spin_unlock(&pr_tmpl->registration_lock); + +write_aptpl: + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); + +out_put_pr_reg: + core_scsi3_put_pr_reg(pr_reg); + return ret; +} + +static sense_reason_t +core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key) +{ + struct se_device *dev = cmd->se_dev; + struct se_node_acl *pr_reg_nacl; + struct se_session *se_sess = cmd->se_sess; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; + u64 pr_res_mapped_lun = 0; + int calling_it_nexus = 0; + /* + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, + se_sess->se_node_acl, se_sess); + if (!pr_reg_n) { + pr_err("SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for CLEAR\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * From spc4r17 section 5.7.11.6, Clearing: + * + * Any application client may release the persistent reservation and + * remove all registrations from a device server by issuing a + * PERSISTENT RESERVE OUT command with CLEAR service action through a + * registered I_T nexus with the following parameter: + * + * a) RESERVATION KEY field set to the value of the reservation key + * that is registered with the logical unit for the I_T nexus. + */ + if (res_key != pr_reg_n->pr_res_key) { + pr_err("SPC-3 PR REGISTER: Received" + " res_key: 0x%016Lx does not match" + " existing SA REGISTER res_key:" + " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); + core_scsi3_put_pr_reg(pr_reg_n); + return TCM_RESERVATION_CONFLICT; + } + /* + * a) Release the persistent reservation, if any; + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (pr_res_holder) { + struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; + __core_scsi3_complete_pro_release(dev, pr_res_nacl, + pr_res_holder, 0, 0); + } + spin_unlock(&dev->dev_reservation_lock); + /* + * b) Remove all registration(s) (see spc4r17 5.7.7); + */ + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, NULL, + calling_it_nexus); + /* + * e) Establish a unit attention condition for the initiator + * port associated with every registered I_T nexus other + * than the I_T nexus on which the PERSISTENT RESERVE OUT + * command with CLEAR service action was received, with the + * additional sense code set to RESERVATIONS PREEMPTED. + */ + if (!calling_it_nexus) + target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, + 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); + } + spin_unlock(&pr_tmpl->registration_lock); + + pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", + cmd->se_tfo->fabric_name); + + core_scsi3_update_and_write_aptpl(cmd->se_dev, false); + + core_scsi3_pr_generation(dev); + return 0; +} + +static void __core_scsi3_complete_pro_preempt( + struct se_device *dev, + struct t10_pr_registration *pr_reg, + struct list_head *preempt_and_abort_list, + int type, + int scope, + enum preempt_type preempt_type) +{ + struct se_node_acl *nacl = pr_reg->pr_reg_nacl; + const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; + char i_buf[PR_REG_ISID_ID_LEN] = { }; + + lockdep_assert_held(&dev->dev_reservation_lock); + + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + /* + * Do an implicit RELEASE of the existing reservation. + */ + if (dev->dev_pr_res_holder) + __core_scsi3_complete_pro_release(dev, nacl, + dev->dev_pr_res_holder, 0, 0); + + dev->dev_pr_res_holder = pr_reg; + pr_reg->pr_res_holder = 1; + pr_reg->pr_res_type = type; + pr_reg->pr_res_scope = scope; + + pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" + " reservation holder TYPE: %s ALL_TG_PT: %d\n", + tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", + core_scsi3_pr_dump_type(type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", + tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", + nacl->initiatorname, i_buf); + /* + * For PREEMPT_AND_ABORT, add the preempting reservation's + * struct t10_pr_registration to the list that will be compared + * against received CDBs.. + */ + if (preempt_and_abort_list) + list_add_tail(&pr_reg->pr_reg_abort_list, + preempt_and_abort_list); +} + +static void core_scsi3_release_preempt_and_abort( + struct list_head *preempt_and_abort_list, + struct t10_pr_registration *pr_reg_holder) +{ + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + + list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, + pr_reg_abort_list) { + + list_del(&pr_reg->pr_reg_abort_list); + if (pr_reg_holder == pr_reg) + continue; + if (pr_reg->pr_res_holder) { + pr_warn("pr_reg->pr_res_holder still set\n"); + continue; + } + + pr_reg->pr_reg_deve = NULL; + pr_reg->pr_reg_nacl = NULL; + kmem_cache_free(t10_pr_reg_cache, pr_reg); + } +} + +static sense_reason_t +core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, + u64 sa_res_key, enum preempt_type preempt_type) +{ + struct se_device *dev = cmd->se_dev; + struct se_node_acl *pr_reg_nacl; + struct se_session *se_sess = cmd->se_sess; + LIST_HEAD(preempt_and_abort_list); + struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + u64 pr_res_mapped_lun = 0; + int all_reg = 0, calling_it_nexus = 0; + bool sa_res_key_unmatched = sa_res_key != 0; + int prh_type = 0, prh_scope = 0; + + if (!se_sess) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, + se_sess); + if (!pr_reg_n) { + pr_err("SPC-3 PR: Unable to locate" + " PR_REGISTERED *pr_reg for PREEMPT%s\n", + (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : ""); + return TCM_RESERVATION_CONFLICT; + } + if (pr_reg_n->pr_res_key != res_key) { + core_scsi3_put_pr_reg(pr_reg_n); + return TCM_RESERVATION_CONFLICT; + } + if (scope != PR_SCOPE_LU_SCOPE) { + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + core_scsi3_put_pr_reg(pr_reg_n); + return TCM_INVALID_PARAMETER_LIST; + } + + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (pr_res_holder && + ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) + all_reg = 1; + + if (!all_reg && !sa_res_key) { + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg_n); + return TCM_INVALID_PARAMETER_LIST; + } + /* + * From spc4r17, section 5.7.11.4.4 Removing Registrations: + * + * If the SERVICE ACTION RESERVATION KEY field does not identify a + * persistent reservation holder or there is no persistent reservation + * holder (i.e., there is no persistent reservation), then the device + * server shall perform a preempt by doing the following in an + * uninterrupted series of actions. (See below..) + */ + if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) { + /* + * No existing or SA Reservation Key matching reservations.. + * + * PROUT SA PREEMPT with All Registrant type reservations are + * allowed to be processed without a matching SA Reservation Key + */ + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + /* + * Removing of registrations in non all registrants + * type reservations without a matching SA reservation + * key. + * + * a) Remove the registrations for all I_T nexuses + * specified by the SERVICE ACTION RESERVATION KEY + * field; + * b) Ignore the contents of the SCOPE and TYPE fields; + * c) Process tasks as defined in 5.7.1; and + * d) Establish a unit attention condition for the + * initiator port associated with every I_T nexus + * that lost its registration other than the I_T + * nexus on which the PERSISTENT RESERVE OUT command + * was received, with the additional sense code set + * to REGISTRATIONS PREEMPTED. + */ + if (!all_reg) { + if (pr_reg->pr_res_key != sa_res_key) + continue; + sa_res_key_unmatched = false; + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : + NULL, calling_it_nexus); + } else { + /* + * Case for any existing all registrants type + * reservation, follow logic in spc4r17 section + * 5.7.11.4 Preempting, Table 52 and Figure 7. + * + * For a ZERO SA Reservation key, release + * all other registrations and do an implicit + * release of active persistent reservation. + * + * For a non-ZERO SA Reservation key, only + * release the matching reservation key from + * registrations. + */ + if ((sa_res_key) && + (pr_reg->pr_res_key != sa_res_key)) + continue; + sa_res_key_unmatched = false; + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + if (calling_it_nexus) + continue; + + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : + NULL, 0); + } + if (!calling_it_nexus) + target_ua_allocate_lun(pr_reg_nacl, + pr_res_mapped_lun, 0x2A, + ASCQ_2AH_REGISTRATIONS_PREEMPTED); + } + spin_unlock(&pr_tmpl->registration_lock); + /* + * If a PERSISTENT RESERVE OUT with a PREEMPT service action or + * a PREEMPT AND ABORT service action sets the SERVICE ACTION + * RESERVATION KEY field to a value that does not match any + * registered reservation key, then the device server shall + * complete the command with RESERVATION CONFLICT status. + */ + if (sa_res_key_unmatched) { + spin_unlock(&dev->dev_reservation_lock); + core_scsi3_put_pr_reg(pr_reg_n); + return TCM_RESERVATION_CONFLICT; + } + /* + * For an existing all registrants type reservation + * with a zero SA rservation key, preempt the existing + * reservation with the new PR type and scope. + */ + if (pr_res_holder && all_reg && !(sa_res_key)) { + __core_scsi3_complete_pro_preempt(dev, pr_reg_n, + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, + type, scope, preempt_type); + } + + spin_unlock(&dev->dev_reservation_lock); + + /* + * SPC-4 5.12.11.2.6 Preempting and aborting + * The actions described in this subclause shall be performed + * for all I_T nexuses that are registered with the non-zero + * SERVICE ACTION RESERVATION KEY value, without regard for + * whether the preempted I_T nexuses hold the persistent + * reservation. If the SERVICE ACTION RESERVATION KEY field is + * set to zero and an all registrants persistent reservation is + * present, the device server shall abort all commands for all + * registered I_T nexuses. + */ + if (preempt_type == PREEMPT_AND_ABORT) { + core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, + cmd); + core_scsi3_release_preempt_and_abort( + &preempt_and_abort_list, pr_reg_n); + } + + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); + + core_scsi3_put_pr_reg(pr_reg_n); + core_scsi3_pr_generation(cmd->se_dev); + return 0; + } + /* + * The PREEMPTing SA reservation key matches that of the + * existing persistent reservation, first, we check if + * we are preempting our own reservation. + * From spc4r17, section 5.7.11.4.3 Preempting + * persistent reservations and registration handling + * + * If an all registrants persistent reservation is not + * present, it is not an error for the persistent + * reservation holder to preempt itself (i.e., a + * PERSISTENT RESERVE OUT with a PREEMPT service action + * or a PREEMPT AND ABORT service action with the + * SERVICE ACTION RESERVATION KEY value equal to the + * persistent reservation holder's reservation key that + * is received from the persistent reservation holder). + * In that case, the device server shall establish the + * new persistent reservation and maintain the + * registration. + */ + prh_type = pr_res_holder->pr_res_type; + prh_scope = pr_res_holder->pr_res_scope; + /* + * If the SERVICE ACTION RESERVATION KEY field identifies a + * persistent reservation holder (see 5.7.10), the device + * server shall perform a preempt by doing the following as + * an uninterrupted series of actions: + * + * a) Release the persistent reservation for the holder + * identified by the SERVICE ACTION RESERVATION KEY field; + */ + if (pr_reg_n != pr_res_holder) + __core_scsi3_complete_pro_release(dev, + pr_res_holder->pr_reg_nacl, + dev->dev_pr_res_holder, 0, 0); + /* + * b) Remove the registrations for all I_T nexuses identified + * by the SERVICE ACTION RESERVATION KEY field, except the + * I_T nexus that is being used for the PERSISTENT RESERVE + * OUT command. If an all registrants persistent reservation + * is present and the SERVICE ACTION RESERVATION KEY field + * is set to zero, then all registrations shall be removed + * except for that of the I_T nexus that is being used for + * the PERSISTENT RESERVE OUT command; + */ + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + if (calling_it_nexus) + continue; + + if (sa_res_key && pr_reg->pr_res_key != sa_res_key) + continue; + + pr_reg_nacl = pr_reg->pr_reg_nacl; + pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; + __core_scsi3_free_registration(dev, pr_reg, + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, + calling_it_nexus); + /* + * e) Establish a unit attention condition for the initiator + * port associated with every I_T nexus that lost its + * persistent reservation and/or registration, with the + * additional sense code set to REGISTRATIONS PREEMPTED; + */ + target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A, + ASCQ_2AH_REGISTRATIONS_PREEMPTED); + } + spin_unlock(&pr_tmpl->registration_lock); + /* + * c) Establish a persistent reservation for the preempting + * I_T nexus using the contents of the SCOPE and TYPE fields; + */ + __core_scsi3_complete_pro_preempt(dev, pr_reg_n, + (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, + type, scope, preempt_type); + /* + * d) Process tasks as defined in 5.7.1; + * e) See above.. + * f) If the type or scope has changed, then for every I_T nexus + * whose reservation key was not removed, except for the I_T + * nexus on which the PERSISTENT RESERVE OUT command was + * received, the device server shall establish a unit + * attention condition for the initiator port associated with + * that I_T nexus, with the additional sense code set to + * RESERVATIONS RELEASED. If the type or scope have not + * changed, then no unit attention condition(s) shall be + * established for this reason. + */ + if ((prh_type != type) || (prh_scope != scope)) { + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; + if (calling_it_nexus) + continue; + + target_ua_allocate_lun(pr_reg->pr_reg_nacl, + pr_reg->pr_res_mapped_lun, 0x2A, + ASCQ_2AH_RESERVATIONS_RELEASED); + } + spin_unlock(&pr_tmpl->registration_lock); + } + spin_unlock(&dev->dev_reservation_lock); + /* + * Call LUN_RESET logic upon list of struct t10_pr_registration, + * All received CDBs for the matching existing reservation and + * registrations undergo ABORT_TASK logic. + * + * From there, core_scsi3_release_preempt_and_abort() will + * release every registration in the list (which have already + * been removed from the primary pr_reg list), except the + * new persistent reservation holder, the calling Initiator Port. + */ + if (preempt_type == PREEMPT_AND_ABORT) { + core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd); + core_scsi3_release_preempt_and_abort(&preempt_and_abort_list, + pr_reg_n); + } + + if (pr_tmpl->pr_aptpl_active) + core_scsi3_update_and_write_aptpl(cmd->se_dev, true); + + core_scsi3_put_pr_reg(pr_reg_n); + core_scsi3_pr_generation(cmd->se_dev); + return 0; +} + +static sense_reason_t +core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope, + u64 res_key, u64 sa_res_key, enum preempt_type preempt_type) +{ + switch (type) { + case PR_TYPE_WRITE_EXCLUSIVE: + case PR_TYPE_EXCLUSIVE_ACCESS: + case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: + case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: + case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: + case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: + return core_scsi3_pro_preempt(cmd, type, scope, res_key, + sa_res_key, preempt_type); + default: + pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" + " Type: 0x%02x\n", (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", type); + return TCM_INVALID_CDB_FIELD; + } +} + + +static sense_reason_t +core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, + u64 sa_res_key, int aptpl, int unreg) +{ + struct se_session *se_sess = cmd->se_sess; + struct se_device *dev = cmd->se_dev; + struct se_dev_entry *dest_se_deve = NULL; + struct se_lun *se_lun = cmd->se_lun, *tmp_lun; + struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; + struct se_portal_group *se_tpg, *dest_se_tpg = NULL; + const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; + struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + unsigned char *buf; + const unsigned char *initiator_str; + char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { }; + u32 tid_len, tmp_tid_len; + int new_reg = 0, type, scope, matching_iname; + sense_reason_t ret; + unsigned short rtpi; + unsigned char proto_ident; + + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + se_tpg = se_sess->se_tpg; + tf_ops = se_tpg->se_tpg_tfo; + /* + * Follow logic from spc4r17 Section 5.7.8, Table 50 -- + * Register behaviors for a REGISTER AND MOVE service action + * + * Locate the existing *pr_reg via struct se_node_acl pointers + */ + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, + se_sess); + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" + " *pr_reg for REGISTER_AND_MOVE\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * The provided reservation key much match the existing reservation key + * provided during this initiator's I_T nexus registration. + */ + if (res_key != pr_reg->pr_res_key) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received" + " res_key: 0x%016Lx does not match existing SA REGISTER" + " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); + ret = TCM_RESERVATION_CONFLICT; + goto out_put_pr_reg; + } + /* + * The service active reservation key needs to be non zero + */ + if (!sa_res_key) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" + " sa_res_key\n"); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_put_pr_reg; + } + + /* + * Determine the Relative Target Port Identifier where the reservation + * will be moved to for the TransportID containing SCSI initiator WWN + * information. + */ + buf = transport_kmap_data_sg(cmd); + if (!buf) { + ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + goto out_put_pr_reg; + } + + rtpi = get_unaligned_be16(&buf[18]); + tid_len = get_unaligned_be32(&buf[20]); + transport_kunmap_data_sg(cmd); + buf = NULL; + + if ((tid_len + 24) != cmd->data_length) { + pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header" + " does not equal CDB data_length: %u\n", tid_len, + cmd->data_length); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_put_pr_reg; + } + + spin_lock(&dev->se_port_lock); + list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) { + if (tmp_lun->lun_tpg->tpg_rtpi != rtpi) + continue; + dest_se_tpg = tmp_lun->lun_tpg; + dest_tf_ops = dest_se_tpg->se_tpg_tfo; + if (!dest_tf_ops) + continue; + + atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count); + spin_unlock(&dev->se_port_lock); + + if (core_scsi3_tpg_depend_item(dest_se_tpg)) { + pr_err("core_scsi3_tpg_depend_item() failed" + " for dest_se_tpg\n"); + atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out_put_pr_reg; + } + + spin_lock(&dev->se_port_lock); + break; + } + spin_unlock(&dev->se_port_lock); + + if (!dest_se_tpg || !dest_tf_ops) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + " fabric ops from Relative Target Port Identifier:" + " %hu\n", rtpi); + ret = TCM_INVALID_PARAMETER_LIST; + goto out_put_pr_reg; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) { + ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + goto out_put_pr_reg; + } + proto_ident = (buf[24] & 0x0f); + + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" + " 0x%02x\n", proto_ident); + + if (proto_ident != dest_se_tpg->proto_id) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" + " proto_ident: 0x%02x does not match ident: 0x%02x" + " from fabric: %s\n", proto_ident, + dest_se_tpg->proto_id, + dest_tf_ops->fabric_name); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + initiator_str = target_parse_pr_out_transport_id(dest_se_tpg, + &buf[24], &tmp_tid_len, &iport_ptr); + if (!initiator_str) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + " initiator_str from Transport ID\n"); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + + transport_kunmap_data_sg(cmd); + buf = NULL; + + pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" + " %s\n", dest_tf_ops->fabric_name, (iport_ptr != NULL) ? + "port" : "device", initiator_str, (iport_ptr != NULL) ? + iport_ptr : ""); + /* + * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service + * action specifies a TransportID that is the same as the initiator port + * of the I_T nexus for the command received, then the command shall + * be terminated with CHECK CONDITION status, with the sense key set to + * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD + * IN PARAMETER LIST. + */ + pr_reg_nacl = pr_reg->pr_reg_nacl; + matching_iname = (!strcmp(initiator_str, + pr_reg_nacl->initiatorname)) ? 1 : 0; + if (!matching_iname) + goto after_iport_check; + + if (!iport_ptr || !pr_reg->isid_present_at_reg) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" + " matches: %s on received I_T Nexus\n", initiator_str, + pr_reg_nacl->initiatorname); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" + " matches: %s %s on received I_T Nexus\n", + initiator_str, iport_ptr, pr_reg_nacl->initiatorname, + pr_reg->pr_reg_isid); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } +after_iport_check: + /* + * Locate the destination struct se_node_acl from the received Transport ID + */ + mutex_lock(&dest_se_tpg->acl_node_mutex); + dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, + initiator_str); + if (dest_node_acl) + atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); + mutex_unlock(&dest_se_tpg->acl_node_mutex); + + if (!dest_node_acl) { + pr_err("Unable to locate %s dest_node_acl for" + " TransportID%s\n", dest_tf_ops->fabric_name, + initiator_str); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + + if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { + pr_err("core_scsi3_nodeacl_depend_item() for" + " dest_node_acl\n"); + atomic_dec_mb(&dest_node_acl->acl_pr_ref_count); + dest_node_acl = NULL; + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" + " %s from TransportID\n", dest_tf_ops->fabric_name, + dest_node_acl->initiatorname); + + /* + * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET + * PORT IDENTIFIER. + */ + dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); + if (!dest_se_deve) { + pr_err("Unable to locate %s dest_se_deve from RTPI:" + " %hu\n", dest_tf_ops->fabric_name, rtpi); + ret = TCM_INVALID_PARAMETER_LIST; + goto out; + } + + if (core_scsi3_lunacl_depend_item(dest_se_deve)) { + pr_err("core_scsi3_lunacl_depend_item() failed\n"); + kref_put(&dest_se_deve->pr_kref, target_pr_kref_release); + dest_se_deve = NULL; + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" + " ACL for dest_se_deve->mapped_lun: %llu\n", + dest_tf_ops->fabric_name, dest_node_acl->initiatorname, + dest_se_deve->mapped_lun); + + /* + * A persistent reservation needs to already existing in order to + * successfully complete the REGISTER_AND_MOVE service action.. + */ + spin_lock(&dev->dev_reservation_lock); + pr_res_holder = dev->dev_pr_res_holder; + if (!pr_res_holder) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" + " currently held\n"); + spin_unlock(&dev->dev_reservation_lock); + ret = TCM_INVALID_CDB_FIELD; + goto out; + } + /* + * The received on I_T Nexus must be the reservation holder. + * + * From spc4r17 section 5.7.8 Table 50 -- + * Register behaviors for a REGISTER AND MOVE service action + */ + if (!is_reservation_holder(pr_res_holder, pr_reg)) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" + " Nexus is not reservation holder\n"); + spin_unlock(&dev->dev_reservation_lock); + ret = TCM_RESERVATION_CONFLICT; + goto out; + } + /* + * From spc4r17 section 5.7.8: registering and moving reservation + * + * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service + * action is received and the established persistent reservation is a + * Write Exclusive - All Registrants type or Exclusive Access - + * All Registrants type reservation, then the command shall be completed + * with RESERVATION CONFLICT status. + */ + if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move" + " reservation for type: %s\n", + core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); + spin_unlock(&dev->dev_reservation_lock); + ret = TCM_RESERVATION_CONFLICT; + goto out; + } + pr_res_nacl = pr_res_holder->pr_reg_nacl; + /* + * b) Ignore the contents of the (received) SCOPE and TYPE fields; + */ + type = pr_res_holder->pr_res_type; + scope = pr_res_holder->pr_res_type; + /* + * c) Associate the reservation key specified in the SERVICE ACTION + * RESERVATION KEY field with the I_T nexus specified as the + * destination of the register and move, where: + * A) The I_T nexus is specified by the TransportID and the + * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and + * B) Regardless of the TransportID format used, the association for + * the initiator port is based on either the initiator port name + * (see 3.1.71) on SCSI transport protocols where port names are + * required or the initiator port identifier (see 3.1.70) on SCSI + * transport protocols where port names are not required; + * d) Register the reservation key specified in the SERVICE ACTION + * RESERVATION KEY field; + * + * Also, It is not an error for a REGISTER AND MOVE service action to + * register an I_T nexus that is already registered with the same + * reservation key or a different reservation key. + */ + dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, + iport_ptr); + if (!dest_pr_reg) { + struct se_lun *dest_lun = dest_se_deve->se_lun; + + spin_unlock(&dev->dev_reservation_lock); + if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, + dest_lun, dest_se_deve, dest_se_deve->mapped_lun, + iport_ptr, sa_res_key, 0, aptpl, 2, 1)) { + ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES; + goto out; + } + spin_lock(&dev->dev_reservation_lock); + dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, + iport_ptr); + new_reg = 1; + } else { + /* + * e) Retain the reservation key specified in the SERVICE ACTION + * RESERVATION KEY field and associated information; + */ + dest_pr_reg->pr_res_key = sa_res_key; + } + /* + * f) Release the persistent reservation for the persistent reservation + * holder (i.e., the I_T nexus on which the + */ + __core_scsi3_complete_pro_release(dev, pr_res_nacl, + dev->dev_pr_res_holder, 0, 0); + /* + * g) Move the persistent reservation to the specified I_T nexus using + * the same scope and type as the persistent reservation released in + * item f); and + */ + dev->dev_pr_res_holder = dest_pr_reg; + dest_pr_reg->pr_res_holder = 1; + dest_pr_reg->pr_res_type = type; + pr_reg->pr_res_scope = scope; + core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); + /* + * Increment PRGeneration for existing registrations.. + */ + if (!new_reg) + dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; + spin_unlock(&dev->dev_reservation_lock); + + pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" + " created new reservation holder TYPE: %s on object RTPI:" + " %hu PRGeneration: 0x%08x\n", dest_tf_ops->fabric_name, + core_scsi3_pr_dump_type(type), rtpi, + dest_pr_reg->pr_res_generation); + pr_debug("SPC-3 PR Successfully moved reservation from" + " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", + tf_ops->fabric_name, pr_reg_nacl->initiatorname, + i_buf, dest_tf_ops->fabric_name, + dest_node_acl->initiatorname, (iport_ptr != NULL) ? + iport_ptr : ""); + /* + * It is now safe to release configfs group dependencies for destination + * of Transport ID Initiator Device/Port Identifier + */ + core_scsi3_lunacl_undepend_item(dest_se_deve); + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_se_tpg); + /* + * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T + * nexus on which PERSISTENT RESERVE OUT command was received. + */ + if (unreg) { + spin_lock(&pr_tmpl->registration_lock); + __core_scsi3_free_registration(dev, pr_reg, NULL, 1); + spin_unlock(&pr_tmpl->registration_lock); + } else + core_scsi3_put_pr_reg(pr_reg); + + core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl); + + core_scsi3_put_pr_reg(dest_pr_reg); + return 0; +out: + if (buf) + transport_kunmap_data_sg(cmd); + if (dest_se_deve) + core_scsi3_lunacl_undepend_item(dest_se_deve); + if (dest_node_acl) + core_scsi3_nodeacl_undepend_item(dest_node_acl); + core_scsi3_tpg_undepend_item(dest_se_tpg); + +out_put_pr_reg: + core_scsi3_put_pr_reg(pr_reg); + return ret; +} + +static sense_reason_t +target_try_pr_out_pt(struct se_cmd *cmd, u8 sa, u64 res_key, u64 sa_res_key, + u8 type, bool aptpl, bool all_tg_pt, bool spec_i_pt) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + + if (!cmd->se_sess || !cmd->se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + if (!ops->execute_pr_out) { + pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + switch (sa) { + case PRO_REGISTER_AND_MOVE: + case PRO_REPLACE_LOST_RESERVATION: + pr_err("SPC-3 PR: PRO_REGISTER_AND_MOVE and PRO_REPLACE_LOST_RESERVATION are not supported by PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (spec_i_pt || all_tg_pt) { + pr_err("SPC-3 PR: SPEC_I_PT and ALL_TG_PT are not supported by PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + return ops->execute_pr_out(cmd, sa, res_key, sa_res_key, type, aptpl); +} + +/* + * See spc4r17 section 6.14 Table 170 + */ +sense_reason_t +target_scsi3_emulate_pr_out(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *cdb = &cmd->t_task_cdb[0]; + unsigned char *buf; + u64 res_key, sa_res_key; + int sa, scope, type, aptpl; + int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; + sense_reason_t ret; + + /* + * Following spc2r20 5.5.1 Reservations overview: + * + * If a logical unit has been reserved by any RESERVE command and is + * still reserved by any initiator, all PERSISTENT RESERVE IN and all + * PERSISTENT RESERVE OUT commands shall conflict regardless of + * initiator or service action and shall terminate with a RESERVATION + * CONFLICT status. + */ + if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) { + pr_err("Received PERSISTENT_RESERVE CDB while legacy" + " SPC-2 reservation is held, returning" + " RESERVATION_CONFLICT\n"); + return TCM_RESERVATION_CONFLICT; + } + + /* + * FIXME: A NULL struct se_session pointer means an this is not coming from + * a $FABRIC_MOD's nexus, but from internal passthrough ops. + */ + if (!cmd->se_sess) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + if (cmd->data_length < 24) { + pr_warn("SPC-PR: Received PR OUT parameter list" + " length too small: %u\n", cmd->data_length); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + + /* + * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB) + */ + sa = (cdb[1] & 0x1f); + scope = (cdb[2] & 0xf0); + type = (cdb[2] & 0x0f); + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + /* + * From PERSISTENT_RESERVE_OUT parameter list (payload) + */ + res_key = get_unaligned_be64(&buf[0]); + sa_res_key = get_unaligned_be64(&buf[8]); + /* + * REGISTER_AND_MOVE uses a different SA parameter list containing + * SCSI TransportIDs. + */ + if (sa != PRO_REGISTER_AND_MOVE) { + spec_i_pt = (buf[20] & 0x08); + all_tg_pt = (buf[20] & 0x04); + aptpl = (buf[20] & 0x01); + } else { + aptpl = (buf[17] & 0x01); + unreg = (buf[17] & 0x02); + } + /* + * If the backend device has been configured to force APTPL metadata + * write-out, go ahead and propigate aptpl=1 down now. + */ + if (dev->dev_attrib.force_pr_aptpl) + aptpl = 1; + + transport_kunmap_data_sg(cmd); + buf = NULL; + + /* + * SPEC_I_PT=1 is only valid for Service action: REGISTER + */ + if (spec_i_pt && (sa != PRO_REGISTER)) + return TCM_INVALID_PARAMETER_LIST; + + /* + * From spc4r17 section 6.14: + * + * If the SPEC_I_PT bit is set to zero, the service action is not + * REGISTER AND MOVE, and the parameter list length is not 24, then + * the command shall be terminated with CHECK CONDITION status, with + * the sense key set to ILLEGAL REQUEST, and the additional sense + * code set to PARAMETER LIST LENGTH ERROR. + */ + if (!spec_i_pt && (sa != PRO_REGISTER_AND_MOVE) && + (cmd->data_length != 24)) { + pr_warn("SPC-PR: Received PR OUT illegal parameter" + " list length: %u\n", cmd->data_length); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) { + ret = target_try_pr_out_pt(cmd, sa, res_key, sa_res_key, type, + aptpl, all_tg_pt, spec_i_pt); + goto done; + } + + /* + * (core_scsi3_emulate_pro_* function parameters + * are defined by spc4r17 Table 174: + * PERSISTENT_RESERVE_OUT service actions and valid parameters. + */ + switch (sa) { + case PRO_REGISTER: + ret = core_scsi3_emulate_pro_register(cmd, + res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER); + break; + case PRO_RESERVE: + ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key); + break; + case PRO_RELEASE: + ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key); + break; + case PRO_CLEAR: + ret = core_scsi3_emulate_pro_clear(cmd, res_key); + break; + case PRO_PREEMPT: + ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, + res_key, sa_res_key, PREEMPT); + break; + case PRO_PREEMPT_AND_ABORT: + ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, + res_key, sa_res_key, PREEMPT_AND_ABORT); + break; + case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: + ret = core_scsi3_emulate_pro_register(cmd, + 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, REGISTER_AND_IGNORE_EXISTING_KEY); + break; + case PRO_REGISTER_AND_MOVE: + ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key, + sa_res_key, aptpl, unreg); + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_OUT service" + " action: 0x%02x\n", sa); + return TCM_INVALID_CDB_FIELD; + } + +done: + if (!ret) + target_complete_cmd(cmd, SAM_STAT_GOOD); + return ret; +} + +/* + * PERSISTENT_RESERVE_IN Service Action READ_KEYS + * + * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160 + */ +static sense_reason_t +core_scsi3_pri_read_keys(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_pr_registration *pr_reg; + unsigned char *buf; + u32 add_len = 0, off = 8; + + if (cmd->data_length < 8) { + pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" + " too small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + put_unaligned_be32(dev->t10_pr.pr_generation, buf); + + spin_lock(&dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, + pr_reg_list) { + /* + * Check for overflow of 8byte PRI READ_KEYS payload and + * next reservation key list descriptor. + */ + if (off + 8 <= cmd->data_length) { + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); + off += 8; + } + /* + * SPC5r17: 6.16.2 READ KEYS service action + * The ADDITIONAL LENGTH field indicates the number of bytes in + * the Reservation key list. The contents of the ADDITIONAL + * LENGTH field are not altered based on the allocation length + */ + add_len += 8; + } + spin_unlock(&dev->t10_pr.registration_lock); + + put_unaligned_be32(add_len, &buf[4]); + target_set_cmd_data_length(cmd, 8 + add_len); + + transport_kunmap_data_sg(cmd); + + return 0; +} + +/* + * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION + * + * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162 + */ +static sense_reason_t +core_scsi3_pri_read_reservation(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_pr_registration *pr_reg; + unsigned char *buf; + u64 pr_res_key; + u32 add_len = 0; + + if (cmd->data_length < 8) { + pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" + " too small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]); + + spin_lock(&dev->dev_reservation_lock); + pr_reg = dev->dev_pr_res_holder; + if (pr_reg) { + /* + * Set the Additional Length to 16 when a reservation is held + */ + add_len = 16; + put_unaligned_be32(add_len, &buf[4]); + + if (cmd->data_length < 22) + goto err; + + /* + * Set the Reservation key. + * + * From spc4r17, section 5.7.10: + * A persistent reservation holder has its reservation key + * returned in the parameter data from a PERSISTENT + * RESERVE IN command with READ RESERVATION service action as + * follows: + * a) For a persistent reservation of the type Write Exclusive + * - All Registrants or Exclusive Access  All Regitrants, + * the reservation key shall be set to zero; or + * b) For all other persistent reservation types, the + * reservation key shall be set to the registered + * reservation key for the I_T nexus that holds the + * persistent reservation. + */ + if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) + pr_res_key = 0; + else + pr_res_key = pr_reg->pr_res_key; + + put_unaligned_be64(pr_res_key, &buf[8]); + /* + * Set the SCOPE and TYPE + */ + buf[21] = (pr_reg->pr_res_scope & 0xf0) | + (pr_reg->pr_res_type & 0x0f); + } + + target_set_cmd_data_length(cmd, 8 + add_len); + +err: + spin_unlock(&dev->dev_reservation_lock); + transport_kunmap_data_sg(cmd); + + return 0; +} + +/* + * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES + * + * See spc4r17 section 6.13.4 Table 165 + */ +static sense_reason_t +core_scsi3_pri_report_capabilities(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + unsigned char *buf; + u16 len = 8; /* Hardcoded to 8. */ + + if (cmd->data_length < 6) { + pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" + " %u too small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + put_unaligned_be16(len, &buf[0]); + buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ + buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ + buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ + buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */ + /* + * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so + * set the TMV: Task Mask Valid bit. + */ + buf[3] |= 0x80; + /* + * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 + */ + buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */ + /* + * PTPL_A: Persistence across Target Power Loss Active bit + */ + if (pr_tmpl->pr_aptpl_active) + buf[3] |= 0x01; + /* + * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167 + */ + buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ + buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ + buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ + buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ + buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + + target_set_cmd_data_length(cmd, len); + + transport_kunmap_data_sg(cmd); + + return 0; +} + +/* + * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS + * + * See spc4r17 section 6.13.5 Table 168 and 169 + */ +static sense_reason_t +core_scsi3_pri_read_full_status(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_node_acl *se_nacl; + struct se_portal_group *se_tpg; + struct t10_pr_registration *pr_reg, *pr_reg_tmp; + struct t10_reservation *pr_tmpl = &dev->t10_pr; + unsigned char *buf; + u32 add_desc_len = 0, add_len = 0; + u32 off = 8; /* off into first Full Status descriptor */ + int format_code = 0, pr_res_type = 0, pr_res_scope = 0; + int exp_desc_len, desc_len; + bool all_reg = false; + + if (cmd->data_length < 8) { + pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" + " too small\n", cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]); + + spin_lock(&dev->dev_reservation_lock); + if (dev->dev_pr_res_holder) { + struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder; + + if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG || + pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) { + all_reg = true; + pr_res_type = pr_holder->pr_res_type; + pr_res_scope = pr_holder->pr_res_scope; + } + } + spin_unlock(&dev->dev_reservation_lock); + + spin_lock(&pr_tmpl->registration_lock); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, + &pr_tmpl->registration_list, pr_reg_list) { + + se_nacl = pr_reg->pr_reg_nacl; + se_tpg = pr_reg->pr_reg_nacl->se_tpg; + add_desc_len = 0; + + atomic_inc_mb(&pr_reg->pr_res_holders); + spin_unlock(&pr_tmpl->registration_lock); + /* + * Determine expected length of $FABRIC_MOD specific + * TransportID full status descriptor.. + */ + exp_desc_len = target_get_pr_transport_id_len(se_nacl, pr_reg, + &format_code); + if (exp_desc_len < 0 || + exp_desc_len + add_len > cmd->data_length) { + pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" + " out of buffer: %d\n", cmd->data_length); + spin_lock(&pr_tmpl->registration_lock); + atomic_dec_mb(&pr_reg->pr_res_holders); + break; + } + /* + * Set RESERVATION KEY + */ + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); + off += 8; + off += 4; /* Skip Over Reserved area */ + + /* + * Set ALL_TG_PT bit if PROUT SA REGISTER had this set. + */ + if (pr_reg->pr_reg_all_tg_pt) + buf[off] = 0x02; + /* + * The struct se_lun pointer will be present for the + * reservation holder for PR_HOLDER bit. + * + * Also, if this registration is the reservation + * holder or there is an All Registrants reservation + * active, fill in SCOPE and TYPE in the next byte. + */ + if (pr_reg->pr_res_holder) { + buf[off++] |= 0x01; + buf[off++] = (pr_reg->pr_res_scope & 0xf0) | + (pr_reg->pr_res_type & 0x0f); + } else if (all_reg) { + buf[off++] |= 0x01; + buf[off++] = (pr_res_scope & 0xf0) | + (pr_res_type & 0x0f); + } else { + off += 2; + } + + off += 4; /* Skip over reserved area */ + /* + * From spc4r17 6.3.15: + * + * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT + * IDENTIFIER field contains the relative port identifier (see + * 3.1.120) of the target port that is part of the I_T nexus + * described by this full status descriptor. If the ALL_TG_PT + * bit is set to one, the contents of the RELATIVE TARGET PORT + * IDENTIFIER field are not defined by this standard. + */ + if (!pr_reg->pr_reg_all_tg_pt) { + u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi; + + put_unaligned_be16(sep_rtpi, &buf[off]); + off += 2; + } else + off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */ + + buf[off+4] = se_tpg->proto_id; + + /* + * Now, have the $FABRIC_MOD fill in the transport ID. + */ + desc_len = target_get_pr_transport_id(se_nacl, pr_reg, + &format_code, &buf[off+4]); + + spin_lock(&pr_tmpl->registration_lock); + atomic_dec_mb(&pr_reg->pr_res_holders); + + if (desc_len < 0) + break; + /* + * Set the ADDITIONAL DESCRIPTOR LENGTH + */ + put_unaligned_be32(desc_len, &buf[off]); + off += 4; + /* + * Size of full desctipor header minus TransportID + * containing $FABRIC_MOD specific) initiator device/port + * WWN information. + * + * See spc4r17 Section 6.13.5 Table 169 + */ + add_desc_len = (24 + desc_len); + + off += desc_len; + add_len += add_desc_len; + } + spin_unlock(&pr_tmpl->registration_lock); + /* + * Set ADDITIONAL_LENGTH + */ + put_unaligned_be32(add_len, &buf[4]); + target_set_cmd_data_length(cmd, 8 + add_len); + + transport_kunmap_data_sg(cmd); + + return 0; +} + +static sense_reason_t target_try_pr_in_pt(struct se_cmd *cmd, u8 sa) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + unsigned char *buf; + sense_reason_t ret; + + if (cmd->data_length < 8) { + pr_err("PRIN SA SCSI Data Length: %u too small\n", + cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + if (!ops->execute_pr_in) { + pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (sa == PRI_READ_FULL_STATUS) { + pr_err("SPC-3 PR: PRI_READ_FULL_STATUS is not supported by PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + ret = ops->execute_pr_in(cmd, sa, buf); + + transport_kunmap_data_sg(cmd); + return ret; +} + +sense_reason_t +target_scsi3_emulate_pr_in(struct se_cmd *cmd) +{ + u8 sa = cmd->t_task_cdb[1] & 0x1f; + sense_reason_t ret; + + /* + * Following spc2r20 5.5.1 Reservations overview: + * + * If a logical unit has been reserved by any RESERVE command and is + * still reserved by any initiator, all PERSISTENT RESERVE IN and all + * PERSISTENT RESERVE OUT commands shall conflict regardless of + * initiator or service action and shall terminate with a RESERVATION + * CONFLICT status. + */ + if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) { + pr_err("Received PERSISTENT_RESERVE CDB while legacy" + " SPC-2 reservation is held, returning" + " RESERVATION_CONFLICT\n"); + return TCM_RESERVATION_CONFLICT; + } + + if (cmd->se_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) { + ret = target_try_pr_in_pt(cmd, sa); + goto done; + } + + switch (sa) { + case PRI_READ_KEYS: + ret = core_scsi3_pri_read_keys(cmd); + break; + case PRI_READ_RESERVATION: + ret = core_scsi3_pri_read_reservation(cmd); + break; + case PRI_REPORT_CAPABILITIES: + ret = core_scsi3_pri_report_capabilities(cmd); + break; + case PRI_READ_FULL_STATUS: + ret = core_scsi3_pri_read_full_status(cmd); + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_IN service" + " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); + return TCM_INVALID_CDB_FIELD; + } + +done: + if (!ret) + target_complete_cmd(cmd, SAM_STAT_GOOD); + return ret; +} + +sense_reason_t +target_check_reservation(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + sense_reason_t ret; + + if (!cmd->se_sess) + return 0; + if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) + return 0; + if (!dev->dev_attrib.emulate_pr) + return 0; + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) + return 0; + + spin_lock(&dev->dev_reservation_lock); + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) + ret = target_scsi2_reservation_check(cmd); + else + ret = target_scsi3_pr_reservation_check(cmd); + spin_unlock(&dev->dev_reservation_lock); + + return ret; +} diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h new file mode 100644 index 0000000000..b793c99637 --- /dev/null +++ b/drivers/target/target_core_pr.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_PR_H +#define TARGET_CORE_PR_H + +#include <linux/types.h> +#include <target/target_core_base.h> + +/* + * PERSISTENT_RESERVE_OUT service action codes + * + * spc5r04b section 6.15.2 Table 174 + */ +#define PRO_REGISTER 0x00 +#define PRO_RESERVE 0x01 +#define PRO_RELEASE 0x02 +#define PRO_CLEAR 0x03 +#define PRO_PREEMPT 0x04 +#define PRO_PREEMPT_AND_ABORT 0x05 +#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06 +#define PRO_REGISTER_AND_MOVE 0x07 +#define PRO_REPLACE_LOST_RESERVATION 0x08 +/* + * PERSISTENT_RESERVE_IN service action codes + * + * spc5r04b section 6.14.1 Table 162 + */ +#define PRI_READ_KEYS 0x00 +#define PRI_READ_RESERVATION 0x01 +#define PRI_REPORT_CAPABILITIES 0x02 +#define PRI_READ_FULL_STATUS 0x03 +/* + * PERSISTENT_RESERVE_ SCOPE field + * + * spc5r04b section 6.14.3.2 Table 166 + */ +#define PR_SCOPE_LU_SCOPE 0x00 +/* + * PERSISTENT_RESERVE_* TYPE field + * + * spc5r04b section 6.14.3.3 Table 167 + */ +#define PR_TYPE_WRITE_EXCLUSIVE 0x01 +#define PR_TYPE_EXCLUSIVE_ACCESS 0x03 +#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05 +#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06 +#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07 +#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08 + +#define PR_APTPL_MAX_IPORT_LEN 256 +#define PR_APTPL_MAX_TPORT_LEN 256 + +/* + * Function defined in target_core_spc.c + */ +void spc_gen_naa_6h_vendor_specific(struct se_device *, unsigned char *); + +extern struct kmem_cache *t10_pr_reg_cache; + +extern void core_pr_dump_initiator_port(struct t10_pr_registration *, + char *, u32); +extern void target_release_reservation(struct se_device *dev); +extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *); +extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *); +extern int core_scsi3_alloc_aptpl_registration( + struct t10_reservation *, u64, + unsigned char *, unsigned char *, u64, + unsigned char *, u16, u64, int, int, u8); +extern int core_scsi3_check_aptpl_registration(struct se_device *, + struct se_portal_group *, struct se_lun *, + struct se_node_acl *, u64); +extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, + struct se_node_acl *); +extern void core_scsi3_free_all_registrations(struct se_device *); +extern unsigned char *core_scsi3_pr_dump_type(int); + +extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *); +extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *); +extern sense_reason_t target_check_reservation(struct se_cmd *); + +#endif /* TARGET_CORE_PR_H */ diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c new file mode 100644 index 0000000000..0d4f09693e --- /dev/null +++ b/drivers/target/target_core_pscsi.c @@ -0,0 +1,1069 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_pscsi.c + * + * This file contains the generic target mode <-> Linux SCSI subsystem plugin. + * + * (c) Copyright 2003-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/string.h> +#include <linux/parser.h> +#include <linux/timer.h> +#include <linux/blkdev.h> +#include <linux/blk_types.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/cdrom.h> +#include <linux/ratelimit.h> +#include <linux/module.h> +#include <asm/unaligned.h> + +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> + +#include "target_core_alua.h" +#include "target_core_internal.h" +#include "target_core_pscsi.h" + +static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev) +{ + return container_of(dev, struct pscsi_dev_virt, dev); +} + +static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); +static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t); + +/* pscsi_attach_hba(): + * + * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. + * from the passed SCSI Host ID. + */ +static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct pscsi_hba_virt *phv; + + phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); + if (!phv) { + pr_err("Unable to allocate struct pscsi_hba_virt\n"); + return -ENOMEM; + } + phv->phv_host_id = host_id; + phv->phv_mode = PHV_VIRTUAL_HOST_ID; + + hba->hba_ptr = phv; + + pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" + " Generic Target Core Stack %s\n", hba->hba_id, + PSCSI_VERSION, TARGET_CORE_VERSION); + pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", + hba->hba_id); + + return 0; +} + +static void pscsi_detach_hba(struct se_hba *hba) +{ + struct pscsi_hba_virt *phv = hba->hba_ptr; + struct Scsi_Host *scsi_host = phv->phv_lld_host; + + if (scsi_host) { + scsi_host_put(scsi_host); + + pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" + " Generic Target Core\n", hba->hba_id, + (scsi_host->hostt->name) ? (scsi_host->hostt->name) : + "Unknown"); + } else + pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" + " from Generic Target Core\n", hba->hba_id); + + kfree(phv); + hba->hba_ptr = NULL; +} + +static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) +{ + struct pscsi_hba_virt *phv = hba->hba_ptr; + struct Scsi_Host *sh = phv->phv_lld_host; + /* + * Release the struct Scsi_Host + */ + if (!mode_flag) { + if (!sh) + return 0; + + phv->phv_lld_host = NULL; + phv->phv_mode = PHV_VIRTUAL_HOST_ID; + + pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" + " %s\n", hba->hba_id, (sh->hostt->name) ? + (sh->hostt->name) : "Unknown"); + + scsi_host_put(sh); + return 0; + } + /* + * Otherwise, locate struct Scsi_Host from the original passed + * pSCSI Host ID and enable for phba mode + */ + sh = scsi_host_lookup(phv->phv_host_id); + if (!sh) { + pr_err("pSCSI: Unable to locate SCSI Host for" + " phv_host_id: %d\n", phv->phv_host_id); + return -EINVAL; + } + + phv->phv_lld_host = sh; + phv->phv_mode = PHV_LLD_SCSI_HOST_NO; + + pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", + hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); + + return 1; +} + +static void pscsi_tape_read_blocksize(struct se_device *dev, + struct scsi_device *sdev) +{ + unsigned char cdb[MAX_COMMAND_SIZE], *buf; + int ret; + + buf = kzalloc(12, GFP_KERNEL); + if (!buf) + goto out_free; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = MODE_SENSE; + cdb[4] = 0x0c; /* 12 bytes */ + + ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 12, HZ, 1, NULL); + if (ret) + goto out_free; + + /* + * If MODE_SENSE still returns zero, set the default value to 1024. + */ + sdev->sector_size = get_unaligned_be24(&buf[9]); +out_free: + if (!sdev->sector_size) + sdev->sector_size = 1024; + + kfree(buf); +} + +static void +pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) +{ + if (sdev->inquiry_len < INQUIRY_LEN) + return; + /* + * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun() + */ + BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1); + snprintf(wwn->vendor, sizeof(wwn->vendor), + "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor); + BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1); + snprintf(wwn->model, sizeof(wwn->model), + "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model); + BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1); + snprintf(wwn->revision, sizeof(wwn->revision), + "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev); +} + +static int +pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) +{ + unsigned char cdb[MAX_COMMAND_SIZE], *buf; + int ret; + + buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = INQUIRY; + cdb[1] = 0x01; /* Query VPD */ + cdb[2] = 0x80; /* Unit Serial Number */ + put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]); + + ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, + INQUIRY_VPD_SERIAL_LEN, HZ, 1, NULL); + if (ret) + goto out_free; + + snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); + + wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL; + + kfree(buf); + return 0; + +out_free: + kfree(buf); + return -EPERM; +} + +static void +pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, + struct t10_wwn *wwn) +{ + unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; + int ident_len, page_len, off = 4, ret; + struct t10_vpd *vpd; + + buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); + if (!buf) + return; + + memset(cdb, 0, MAX_COMMAND_SIZE); + cdb[0] = INQUIRY; + cdb[1] = 0x01; /* Query VPD */ + cdb[2] = 0x83; /* Device Identifier */ + put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]); + + ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, + INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, HZ, 1, NULL); + if (ret) + goto out; + + page_len = get_unaligned_be16(&buf[2]); + while (page_len > 0) { + /* Grab a pointer to the Identification descriptor */ + page_83 = &buf[off]; + ident_len = page_83[3]; + if (!ident_len) { + pr_err("page_83[3]: identifier" + " length zero!\n"); + break; + } + pr_debug("T10 VPD Identifier Length: %d\n", ident_len); + + vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); + if (!vpd) { + pr_err("Unable to allocate memory for" + " struct t10_vpd\n"); + goto out; + } + INIT_LIST_HEAD(&vpd->vpd_list); + + transport_set_vpd_proto_id(vpd, page_83); + transport_set_vpd_assoc(vpd, page_83); + + if (transport_set_vpd_ident_type(vpd, page_83) < 0) { + off += (ident_len + 4); + page_len -= (ident_len + 4); + kfree(vpd); + continue; + } + if (transport_set_vpd_ident(vpd, page_83) < 0) { + off += (ident_len + 4); + page_len -= (ident_len + 4); + kfree(vpd); + continue; + } + + list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); + off += (ident_len + 4); + page_len -= (ident_len + 4); + } + +out: + kfree(buf); +} + +static int pscsi_add_device_to_list(struct se_device *dev, + struct scsi_device *sd) +{ + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct request_queue *q = sd->request_queue; + + pdv->pdv_sd = sd; + + if (!sd->queue_depth) { + sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; + + pr_err("Set broken SCSI Device %d:%d:%llu" + " queue_depth to %d\n", sd->channel, sd->id, + sd->lun, sd->queue_depth); + } + + dev->dev_attrib.hw_block_size = + min_not_zero((int)sd->sector_size, 512); + dev->dev_attrib.hw_max_sectors = + min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); + dev->dev_attrib.hw_queue_depth = sd->queue_depth; + + /* + * Setup our standard INQUIRY info into se_dev->t10_wwn + */ + pscsi_set_inquiry_info(sd, &dev->t10_wwn); + + /* + * Locate VPD WWN Information used for various purposes within + * the Storage Engine. + */ + if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) { + /* + * If VPD Unit Serial returned GOOD status, try + * VPD Device Identification page (0x83). + */ + pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn); + } + + /* + * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. + */ + if (sd->type == TYPE_TAPE) { + pscsi_tape_read_blocksize(dev, sd); + dev->dev_attrib.hw_block_size = sd->sector_size; + } + return 0; +} + +static struct se_device *pscsi_alloc_device(struct se_hba *hba, + const char *name) +{ + struct pscsi_dev_virt *pdv; + + pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); + if (!pdv) { + pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); + return NULL; + } + + pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); + return &pdv->dev; +} + +/* + * Called with struct Scsi_Host->host_lock called. + */ +static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) + __releases(sh->host_lock) +{ + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct Scsi_Host *sh = sd->host; + struct block_device *bd; + int ret; + + if (scsi_device_get(sd)) { + pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n", + sh->host_no, sd->channel, sd->id, sd->lun); + spin_unlock_irq(sh->host_lock); + return -EIO; + } + spin_unlock_irq(sh->host_lock); + /* + * Claim exclusive struct block_device access to struct scsi_device + * for TYPE_DISK and TYPE_ZBC using supplied udev_path + */ + bd = blkdev_get_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ, + pdv, NULL); + if (IS_ERR(bd)) { + pr_err("pSCSI: blkdev_get_by_path() failed\n"); + scsi_device_put(sd); + return PTR_ERR(bd); + } + pdv->pdv_bd = bd; + + ret = pscsi_add_device_to_list(dev, sd); + if (ret) { + blkdev_put(pdv->pdv_bd, pdv); + scsi_device_put(sd); + return ret; + } + + pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n", + phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC", + sh->host_no, sd->channel, sd->id, sd->lun); + return 0; +} + +/* + * Called with struct Scsi_Host->host_lock called. + */ +static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) + __releases(sh->host_lock) +{ + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; + struct Scsi_Host *sh = sd->host; + int ret; + + if (scsi_device_get(sd)) { + pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n", + sh->host_no, sd->channel, sd->id, sd->lun); + spin_unlock_irq(sh->host_lock); + return -EIO; + } + spin_unlock_irq(sh->host_lock); + + ret = pscsi_add_device_to_list(dev, sd); + if (ret) { + scsi_device_put(sd); + return ret; + } + pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", + phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, + sd->channel, sd->id, sd->lun); + + return 0; +} + +static int pscsi_configure_device(struct se_device *dev) +{ + struct se_hba *hba = dev->se_hba; + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct scsi_device *sd; + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; + struct Scsi_Host *sh = phv->phv_lld_host; + int legacy_mode_enable = 0; + int ret; + + if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || + !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || + !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { + pr_err("Missing scsi_channel_id=, scsi_target_id= and" + " scsi_lun_id= parameters\n"); + return -EINVAL; + } + + /* + * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the + * struct Scsi_Host we will need to bring the TCM/pSCSI object online + */ + if (!sh) { + if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { + pr_err("pSCSI: Unable to locate struct" + " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); + return -ENODEV; + } + /* + * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device + * reference, we enforce that udev_path has been set + */ + if (!(dev->dev_flags & DF_USING_UDEV_PATH)) { + pr_err("pSCSI: udev_path attribute has not" + " been set before ENABLE=1\n"); + return -EINVAL; + } + /* + * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, + * use the original TCM hba ID to reference Linux/SCSI Host No + * and enable for PHV_LLD_SCSI_HOST_NO mode. + */ + if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { + if (hba->dev_count) { + pr_err("pSCSI: Unable to set hba_mode" + " with active devices\n"); + return -EEXIST; + } + + if (pscsi_pmode_enable_hba(hba, 1) != 1) + return -ENODEV; + + legacy_mode_enable = 1; + hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; + sh = phv->phv_lld_host; + } else { + sh = scsi_host_lookup(pdv->pdv_host_id); + if (!sh) { + pr_err("pSCSI: Unable to locate" + " pdv_host_id: %d\n", pdv->pdv_host_id); + return -EINVAL; + } + pdv->pdv_lld_host = sh; + } + } else { + if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { + pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" + " struct Scsi_Host exists\n"); + return -EEXIST; + } + } + + spin_lock_irq(sh->host_lock); + list_for_each_entry(sd, &sh->__devices, siblings) { + if ((pdv->pdv_channel_id != sd->channel) || + (pdv->pdv_target_id != sd->id) || + (pdv->pdv_lun_id != sd->lun)) + continue; + /* + * Functions will release the held struct scsi_host->host_lock + * before calling pscsi_add_device_to_list() to register + * struct scsi_device with target_core_mod. + */ + switch (sd->type) { + case TYPE_DISK: + case TYPE_ZBC: + ret = pscsi_create_type_disk(dev, sd); + break; + default: + ret = pscsi_create_type_nondisk(dev, sd); + break; + } + + if (ret) { + if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) + scsi_host_put(sh); + else if (legacy_mode_enable) { + pscsi_pmode_enable_hba(hba, 0); + hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; + } + pdv->pdv_sd = NULL; + return ret; + } + return 0; + } + spin_unlock_irq(sh->host_lock); + + pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, + pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); + + if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) + scsi_host_put(sh); + else if (legacy_mode_enable) { + pscsi_pmode_enable_hba(hba, 0); + hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; + } + + return -ENODEV; +} + +static void pscsi_dev_call_rcu(struct rcu_head *p) +{ + struct se_device *dev = container_of(p, struct se_device, rcu_head); + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + + kfree(pdv); +} + +static void pscsi_free_device(struct se_device *dev) +{ + call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); +} + +static void pscsi_destroy_device(struct se_device *dev) +{ + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; + struct scsi_device *sd = pdv->pdv_sd; + + if (sd) { + /* + * Release exclusive pSCSI internal struct block_device claim for + * struct scsi_device with TYPE_DISK or TYPE_ZBC + * from pscsi_create_type_disk() + */ + if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && + pdv->pdv_bd) { + blkdev_put(pdv->pdv_bd, pdv); + pdv->pdv_bd = NULL; + } + /* + * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference + * to struct Scsi_Host now. + */ + if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && + (phv->phv_lld_host != NULL)) + scsi_host_put(phv->phv_lld_host); + else if (pdv->pdv_lld_host) + scsi_host_put(pdv->pdv_lld_host); + + scsi_device_put(sd); + + pdv->pdv_sd = NULL; + } +} + +static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, + unsigned char *req_sense, int valid_data) +{ + struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); + struct scsi_device *sd = pdv->pdv_sd; + unsigned char *cdb = cmd->priv; + + /* + * Special case for REPORT_LUNs which is emulated and not passed on. + */ + if (!cdb) + return; + + /* + * Hack to make sure that Write-Protect modepage is set if R/O mode is + * forced. + */ + if (!cmd->data_length) + goto after_mode_sense; + + if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && + scsi_status == SAM_STAT_GOOD) { + bool read_only = target_lun_is_rdonly(cmd); + + if (read_only) { + unsigned char *buf; + + buf = transport_kmap_data_sg(cmd); + if (!buf) { + ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ + } else { + if (cdb[0] == MODE_SENSE_10) { + if (!(buf[3] & 0x80)) + buf[3] |= 0x80; + } else { + if (!(buf[2] & 0x80)) + buf[2] |= 0x80; + } + + transport_kunmap_data_sg(cmd); + } + } + } +after_mode_sense: + + if (sd->type != TYPE_TAPE || !cmd->data_length) + goto after_mode_select; + + /* + * Hack to correctly obtain the initiator requested blocksize for + * TYPE_TAPE. Since this value is dependent upon each tape media, + * struct scsi_device->sector_size will not contain the correct value + * by default, so we go ahead and set it so + * TRANSPORT(dev)->get_blockdev() returns the correct value to the + * storage engine. + */ + if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && + scsi_status == SAM_STAT_GOOD) { + unsigned char *buf; + u16 bdl; + u32 blocksize; + + buf = sg_virt(&cmd->t_data_sg[0]); + if (!buf) { + pr_err("Unable to get buf for scatterlist\n"); + goto after_mode_select; + } + + if (cdb[0] == MODE_SELECT) + bdl = buf[3]; + else + bdl = get_unaligned_be16(&buf[6]); + + if (!bdl) + goto after_mode_select; + + if (cdb[0] == MODE_SELECT) + blocksize = get_unaligned_be24(&buf[9]); + else + blocksize = get_unaligned_be24(&buf[13]); + + sd->sector_size = blocksize; + } +after_mode_select: + + if (scsi_status == SAM_STAT_CHECK_CONDITION) { + transport_copy_sense_to_cmd(cmd, req_sense); + + /* + * check for TAPE device reads with + * FM/EOM/ILI set, so that we can get data + * back despite framework assumption that a + * check condition means there is no data + */ + if (sd->type == TYPE_TAPE && valid_data && + cmd->data_direction == DMA_FROM_DEVICE) { + /* + * is sense data valid, fixed format, + * and have FM, EOM, or ILI set? + */ + if (req_sense[0] == 0xf0 && /* valid, fixed format */ + req_sense[2] & 0xe0 && /* FM, EOM, or ILI */ + (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */ + pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n"); + cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; + } + } + } +} + +enum { + Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, + Opt_scsi_lun_id, Opt_err +}; + +static match_table_t tokens = { + {Opt_scsi_host_id, "scsi_host_id=%d"}, + {Opt_scsi_channel_id, "scsi_channel_id=%d"}, + {Opt_scsi_target_id, "scsi_target_id=%d"}, + {Opt_scsi_lun_id, "scsi_lun_id=%d"}, + {Opt_err, NULL} +}; + +static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev, + const char *page, ssize_t count) +{ + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; + char *orig, *ptr, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, arg, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",\n")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_scsi_host_id: + if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { + pr_err("PSCSI[%d]: Unable to accept" + " scsi_host_id while phv_mode ==" + " PHV_LLD_SCSI_HOST_NO\n", + phv->phv_host_id); + ret = -EINVAL; + goto out; + } + ret = match_int(args, &arg); + if (ret) + goto out; + pdv->pdv_host_id = arg; + pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" + " %d\n", phv->phv_host_id, pdv->pdv_host_id); + pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; + break; + case Opt_scsi_channel_id: + ret = match_int(args, &arg); + if (ret) + goto out; + pdv->pdv_channel_id = arg; + pr_debug("PSCSI[%d]: Referencing SCSI Channel" + " ID: %d\n", phv->phv_host_id, + pdv->pdv_channel_id); + pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; + break; + case Opt_scsi_target_id: + ret = match_int(args, &arg); + if (ret) + goto out; + pdv->pdv_target_id = arg; + pr_debug("PSCSI[%d]: Referencing SCSI Target" + " ID: %d\n", phv->phv_host_id, + pdv->pdv_target_id); + pdv->pdv_flags |= PDF_HAS_TARGET_ID; + break; + case Opt_scsi_lun_id: + ret = match_int(args, &arg); + if (ret) + goto out; + pdv->pdv_lun_id = arg; + pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" + " %d\n", phv->phv_host_id, pdv->pdv_lun_id); + pdv->pdv_flags |= PDF_HAS_LUN_ID; + break; + default: + break; + } + } + +out: + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b) +{ + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct scsi_device *sd = pdv->pdv_sd; + unsigned char host_id[16]; + ssize_t bl; + + if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) + snprintf(host_id, 16, "%d", pdv->pdv_host_id); + else + snprintf(host_id, 16, "PHBA Mode"); + + bl = sprintf(b, "SCSI Device Bus Location:" + " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", + pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, + host_id); + + if (sd) { + bl += sprintf(b + bl, " Vendor: %." + __stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor); + bl += sprintf(b + bl, " Model: %." + __stringify(INQUIRY_MODEL_LEN) "s", sd->model); + bl += sprintf(b + bl, " Rev: %." + __stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev); + } + return bl; +} + +static void pscsi_bi_endio(struct bio *bio) +{ + bio_uninit(bio); + kfree(bio); +} + +static sense_reason_t +pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + struct request *req) +{ + struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); + struct bio *bio = NULL; + struct page *page; + struct scatterlist *sg; + u32 data_len = cmd->data_length, i, len, bytes, off; + int nr_pages = (cmd->data_length + sgl[0].offset + + PAGE_SIZE - 1) >> PAGE_SHIFT; + int nr_vecs = 0, rc; + int rw = (cmd->data_direction == DMA_TO_DEVICE); + + BUG_ON(!cmd->data_length); + + pr_debug("PSCSI: nr_pages: %d\n", nr_pages); + + for_each_sg(sgl, sg, sgl_nents, i) { + page = sg_page(sg); + off = sg->offset; + len = sg->length; + + pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, + page, len, off); + + /* + * We only have one page of data in each sg element, + * we can not cross a page boundary. + */ + if (off + len > PAGE_SIZE) + goto fail; + + if (len > 0 && data_len > 0) { + bytes = min_t(unsigned int, len, PAGE_SIZE - off); + bytes = min(bytes, data_len); + + if (!bio) { +new_bio: + nr_vecs = bio_max_segs(nr_pages); + bio = bio_kmalloc(nr_vecs, GFP_KERNEL); + if (!bio) + goto fail; + bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, + rw ? REQ_OP_WRITE : REQ_OP_READ); + bio->bi_end_io = pscsi_bi_endio; + + pr_debug("PSCSI: Allocated bio: %p," + " dir: %s nr_vecs: %d\n", bio, + (rw) ? "rw" : "r", nr_vecs); + } + + pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" + " bio: %p page: %p len: %d off: %d\n", i, bio, + page, len, off); + + rc = bio_add_pc_page(pdv->pdv_sd->request_queue, + bio, page, bytes, off); + pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", + bio_segments(bio), nr_vecs); + if (rc != bytes) { + pr_debug("PSCSI: Reached bio->bi_vcnt max:" + " %d i: %d bio: %p, allocating another" + " bio\n", bio->bi_vcnt, i, bio); + + rc = blk_rq_append_bio(req, bio); + if (rc) { + pr_err("pSCSI: failed to append bio\n"); + goto fail; + } + + goto new_bio; + } + + data_len -= bytes; + } + } + + if (bio) { + rc = blk_rq_append_bio(req, bio); + if (rc) { + pr_err("pSCSI: failed to append bio\n"); + goto fail; + } + } + + return 0; +fail: + if (bio) + bio_put(bio); + while (req->bio) { + bio = req->bio; + req->bio = bio->bi_next; + bio_put(bio); + } + req->biotail = NULL; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +} + +static sense_reason_t +pscsi_parse_cdb(struct se_cmd *cmd) +{ + if (cmd->se_cmd_flags & SCF_BIDI) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + return passthrough_parse_cdb(cmd, pscsi_execute_cmd); +} + +static sense_reason_t +pscsi_execute_cmd(struct se_cmd *cmd) +{ + struct scatterlist *sgl = cmd->t_data_sg; + u32 sgl_nents = cmd->t_data_nents; + struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); + struct scsi_cmnd *scmd; + struct request *req; + sense_reason_t ret; + + req = scsi_alloc_request(pdv->pdv_sd->request_queue, + cmd->data_direction == DMA_TO_DEVICE ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(req)) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + if (sgl) { + ret = pscsi_map_sg(cmd, sgl, sgl_nents, req); + if (ret) + goto fail_put_request; + } + + req->end_io = pscsi_req_done; + req->end_io_data = cmd; + + scmd = blk_mq_rq_to_pdu(req); + scmd->cmd_len = scsi_command_size(cmd->t_task_cdb); + if (scmd->cmd_len > sizeof(scmd->cmnd)) { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto fail_put_request; + } + memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len); + + if (pdv->pdv_sd->type == TYPE_DISK || + pdv->pdv_sd->type == TYPE_ZBC) + req->timeout = PS_TIMEOUT_DISK; + else + req->timeout = PS_TIMEOUT_OTHER; + scmd->allowed = PS_RETRY; + + cmd->priv = scmd->cmnd; + + blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG); + + return 0; + +fail_put_request: + blk_mq_free_request(req); + return ret; +} + +/* pscsi_get_device_type(): + * + * + */ +static u32 pscsi_get_device_type(struct se_device *dev) +{ + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + struct scsi_device *sd = pdv->pdv_sd; + + return (sd) ? sd->type : TYPE_NO_LUN; +} + +static sector_t pscsi_get_blocks(struct se_device *dev) +{ + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); + + if (pdv->pdv_bd) + return bdev_nr_sectors(pdv->pdv_bd); + return 0; +} + +static enum rq_end_io_ret pscsi_req_done(struct request *req, + blk_status_t status) +{ + struct se_cmd *cmd = req->end_io_data; + struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); + enum sam_status scsi_status = scmd->result & 0xff; + int valid_data = cmd->data_length - scmd->resid_len; + u8 *cdb = cmd->priv; + + if (scsi_status != SAM_STAT_GOOD) { + pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" + " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result); + } + + pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data); + + switch (host_byte(scmd->result)) { + case DID_OK: + target_complete_cmd_with_length(cmd, scsi_status, valid_data); + break; + default: + pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" + " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result); + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + break; + } + + blk_mq_free_request(req); + return RQ_END_IO_NONE; +} + +static const struct target_backend_ops pscsi_ops = { + .name = "pscsi", + .owner = THIS_MODULE, + .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH | + TRANSPORT_FLAG_PASSTHROUGH_ALUA | + TRANSPORT_FLAG_PASSTHROUGH_PGR, + .attach_hba = pscsi_attach_hba, + .detach_hba = pscsi_detach_hba, + .pmode_enable_hba = pscsi_pmode_enable_hba, + .alloc_device = pscsi_alloc_device, + .configure_device = pscsi_configure_device, + .destroy_device = pscsi_destroy_device, + .free_device = pscsi_free_device, + .parse_cdb = pscsi_parse_cdb, + .set_configfs_dev_params = pscsi_set_configfs_dev_params, + .show_configfs_dev_params = pscsi_show_configfs_dev_params, + .get_device_type = pscsi_get_device_type, + .get_blocks = pscsi_get_blocks, + .tb_dev_attrib_attrs = passthrough_attrib_attrs, +}; + +static int __init pscsi_module_init(void) +{ + return transport_backend_register(&pscsi_ops); +} + +static void __exit pscsi_module_exit(void) +{ + target_backend_unregister(&pscsi_ops); +} + +MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(pscsi_module_init); +module_exit(pscsi_module_exit); diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h new file mode 100644 index 0000000000..23d9a6e340 --- /dev/null +++ b/drivers/target/target_core_pscsi.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_PSCSI_H +#define TARGET_CORE_PSCSI_H + +#define PSCSI_VERSION "v4.0" + +/* used in pscsi_find_alloc_len() */ +#ifndef INQUIRY_DATA_SIZE +#define INQUIRY_DATA_SIZE 0x24 +#endif + +/* used in pscsi_add_device_to_list() */ +#define PSCSI_DEFAULT_QUEUEDEPTH 1 + +#define PS_RETRY 5 +#define PS_TIMEOUT_DISK (15*HZ) +#define PS_TIMEOUT_OTHER (500*HZ) + +#include <linux/cache.h> /* ___cacheline_aligned */ +#include <target/target_core_base.h> /* struct se_device */ + +struct block_device; +struct scsi_device; +struct Scsi_Host; + +#define PDF_HAS_CHANNEL_ID 0x01 +#define PDF_HAS_TARGET_ID 0x02 +#define PDF_HAS_LUN_ID 0x04 +#define PDF_HAS_VPD_UNIT_SERIAL 0x08 +#define PDF_HAS_VPD_DEV_IDENT 0x10 +#define PDF_HAS_VIRT_HOST_ID 0x20 + +struct pscsi_dev_virt { + struct se_device dev; + int pdv_flags; + int pdv_host_id; + int pdv_channel_id; + int pdv_target_id; + int pdv_lun_id; + struct block_device *pdv_bd; + struct scsi_device *pdv_sd; + struct Scsi_Host *pdv_lld_host; +} ____cacheline_aligned; + +typedef enum phv_modes { + PHV_VIRTUAL_HOST_ID, + PHV_LLD_SCSI_HOST_NO +} phv_modes_t; + +struct pscsi_hba_virt { + int phv_host_id; + phv_modes_t phv_mode; + struct Scsi_Host *phv_lld_host; +} ____cacheline_aligned; + +#endif /*** TARGET_CORE_PSCSI_H ***/ diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c new file mode 100644 index 0000000000..6f67cc09c2 --- /dev/null +++ b/drivers/target/target_core_rd.c @@ -0,0 +1,684 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_rd.c + * + * This file contains the Storage Engine <-> Ramdisk transport + * specific functions. + * + * (c) Copyright 2003-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/string.h> +#include <linux/parser.h> +#include <linux/highmem.h> +#include <linux/timer.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <scsi/scsi_proto.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> + +#include "target_core_rd.h" + +static inline struct rd_dev *RD_DEV(struct se_device *dev) +{ + return container_of(dev, struct rd_dev, dev); +} + +static int rd_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct rd_host *rd_host; + + rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL); + if (!rd_host) + return -ENOMEM; + + rd_host->rd_host_id = host_id; + + hba->hba_ptr = rd_host; + + pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" + " Generic Target Core Stack %s\n", hba->hba_id, + RD_HBA_VERSION, TARGET_CORE_VERSION); + + return 0; +} + +static void rd_detach_hba(struct se_hba *hba) +{ + struct rd_host *rd_host = hba->hba_ptr; + + pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" + " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); + + kfree(rd_host); + hba->hba_ptr = NULL; +} + +static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 sg_table_count) +{ + struct page *pg; + struct scatterlist *sg; + u32 i, j, page_count = 0, sg_per_table; + + for (i = 0; i < sg_table_count; i++) { + sg = sg_table[i].sg_table; + sg_per_table = sg_table[i].rd_sg_count; + + for (j = 0; j < sg_per_table; j++) { + pg = sg_page(&sg[j]); + if (pg) { + __free_page(pg); + page_count++; + } + } + kfree(sg); + } + + kfree(sg_table); + return page_count; +} + +static void rd_release_device_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, + rd_dev->sg_table_count); + + pr_debug("CORE_RD[%u] - Released device space for Ramdisk" + " Device ID: %u, pages %u in %u tables total bytes %lu\n", + rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, + rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); + + rd_dev->sg_table_array = NULL; + rd_dev->sg_table_count = 0; +} + + +/* rd_build_device_space(): + * + * + */ +static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, + u32 total_sg_needed, unsigned char init_payload) +{ + u32 i = 0, j, page_offset = 0, sg_per_table; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + struct page *pg; + struct scatterlist *sg; + unsigned char *p; + + while (total_sg_needed) { + unsigned int chain_entry = 0; + + sg_per_table = (total_sg_needed > max_sg_per_table) ? + max_sg_per_table : total_sg_needed; + + /* + * Reserve extra element for chain entry + */ + if (sg_per_table < total_sg_needed) + chain_entry = 1; + + sg = kmalloc_array(sg_per_table + chain_entry, sizeof(*sg), + GFP_KERNEL); + if (!sg) + return -ENOMEM; + + sg_init_table(sg, sg_per_table + chain_entry); + + if (i > 0) { + sg_chain(sg_table[i - 1].sg_table, + max_sg_per_table + 1, sg); + } + + sg_table[i].sg_table = sg; + sg_table[i].rd_sg_count = sg_per_table; + sg_table[i].page_start_offset = page_offset; + sg_table[i++].page_end_offset = (page_offset + sg_per_table) + - 1; + + for (j = 0; j < sg_per_table; j++) { + pg = alloc_pages(GFP_KERNEL, 0); + if (!pg) { + pr_err("Unable to allocate scatterlist" + " pages for struct rd_dev_sg_table\n"); + return -ENOMEM; + } + sg_assign_page(&sg[j], pg); + sg[j].length = PAGE_SIZE; + + p = kmap(pg); + memset(p, init_payload, PAGE_SIZE); + kunmap(pg); + } + + page_offset += sg_per_table; + total_sg_needed -= sg_per_table; + } + + return 0; +} + +static int rd_build_device_space(struct rd_dev *rd_dev) +{ + struct rd_dev_sg_table *sg_table; + u32 sg_tables, total_sg_needed; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_page_count <= 0) { + pr_err("Illegal page count: %u for Ramdisk device\n", + rd_dev->rd_page_count); + return -EINVAL; + } + + /* Don't need backing pages for NULLIO */ + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + + total_sg_needed = rd_dev->rd_page_count; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL); + if (!sg_table) + return -ENOMEM; + + rd_dev->sg_table_array = sg_table; + rd_dev->sg_table_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); + if (rc) + return rc; + + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_dev->sg_table_count); + + return 0; +} + +static void rd_release_prot_space(struct rd_dev *rd_dev) +{ + u32 page_count; + + if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) + return; + + page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, + rd_dev->sg_prot_count); + + pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" + " Device ID: %u, pages %u in %u tables total bytes %lu\n", + rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, + rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); + + rd_dev->sg_prot_array = NULL; + rd_dev->sg_prot_count = 0; +} + +static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size) +{ + struct rd_dev_sg_table *sg_table; + u32 total_sg_needed, sg_tables; + u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + int rc; + + if (rd_dev->rd_flags & RDF_NULLIO) + return 0; + /* + * prot_length=8byte dif data + * tot sg needed = rd_page_count * (PGSZ/block_size) * + * (prot_length/block_size) + pad + * PGSZ canceled each other. + */ + total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1; + + sg_tables = (total_sg_needed / max_sg_per_table) + 1; + sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL); + if (!sg_table) + return -ENOMEM; + + rd_dev->sg_prot_array = sg_table; + rd_dev->sg_prot_count = sg_tables; + + rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); + if (rc) + return rc; + + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" + " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, + rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); + + return 0; +} + +static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) +{ + struct rd_dev *rd_dev; + struct rd_host *rd_host = hba->hba_ptr; + + rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL); + if (!rd_dev) + return NULL; + + rd_dev->rd_host = rd_host; + + return &rd_dev->dev; +} + +static int rd_configure_device(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + struct rd_host *rd_host = dev->se_hba->hba_ptr; + int ret; + + if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { + pr_debug("Missing rd_pages= parameter\n"); + return -EINVAL; + } + + ret = rd_build_device_space(rd_dev); + if (ret < 0) + goto fail; + + dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; + dev->dev_attrib.hw_max_sectors = UINT_MAX; + dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; + dev->dev_attrib.is_nonrot = 1; + + rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; + + pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" + " %u pages in %u tables, %lu total bytes\n", + rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_dev->sg_table_count, + (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); + + return 0; + +fail: + rd_release_device_space(rd_dev); + return ret; +} + +static void rd_dev_call_rcu(struct rcu_head *p) +{ + struct se_device *dev = container_of(p, struct se_device, rcu_head); + struct rd_dev *rd_dev = RD_DEV(dev); + + kfree(rd_dev); +} + +static void rd_free_device(struct se_device *dev) +{ + call_rcu(&dev->rcu_head, rd_dev_call_rcu); +} + +static void rd_destroy_device(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + rd_release_device_space(rd_dev); +} + +static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) +{ + struct rd_dev_sg_table *sg_table; + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + + i = page / sg_per_table; + if (i < rd_dev->sg_table_count) { + sg_table = &rd_dev->sg_table_array[i]; + if ((sg_table->page_start_offset <= page) && + (sg_table->page_end_offset >= page)) + return sg_table; + } + + pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", + page); + + return NULL; +} + +static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) +{ + struct rd_dev_sg_table *sg_table; + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / + sizeof(struct scatterlist)); + + i = page / sg_per_table; + if (i < rd_dev->sg_prot_count) { + sg_table = &rd_dev->sg_prot_array[i]; + if ((sg_table->page_start_offset <= page) && + (sg_table->page_end_offset >= page)) + return sg_table; + } + + pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", + page); + + return NULL; +} + +static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) +{ + struct se_device *se_dev = cmd->se_dev; + struct rd_dev *dev = RD_DEV(se_dev); + struct rd_dev_sg_table *prot_table; + struct scatterlist *prot_sg; + u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; + u32 prot_offset, prot_page; + u32 prot_npages __maybe_unused; + u64 tmp; + sense_reason_t rc = 0; + + tmp = cmd->t_task_lba * se_dev->prot_length; + prot_offset = do_div(tmp, PAGE_SIZE); + prot_page = tmp; + + prot_table = rd_get_prot_table(dev, prot_page); + if (!prot_table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + prot_sg = &prot_table->sg_table[prot_page - + prot_table->page_start_offset]; + + if (se_dev->dev_attrib.pi_prot_verify) { + if (is_read) + rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + else + rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, + cmd->t_prot_sg, 0); + } + if (!rc) + sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); + + return rc; +} + +static sense_reason_t +rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) +{ + struct se_device *se_dev = cmd->se_dev; + struct rd_dev *dev = RD_DEV(se_dev); + struct rd_dev_sg_table *table; + struct scatterlist *rd_sg; + struct sg_mapping_iter m; + u32 rd_offset; + u32 rd_size; + u32 rd_page; + u32 src_len; + u64 tmp; + sense_reason_t rc; + + if (dev->rd_flags & RDF_NULLIO) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + + tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; + rd_offset = do_div(tmp, PAGE_SIZE); + rd_page = tmp; + rd_size = cmd->data_length; + + table = rd_get_sg_table(dev, rd_page); + if (!table) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + rd_sg = &table->sg_table[rd_page - table->page_start_offset]; + + pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", + dev->rd_dev_id, + data_direction == DMA_FROM_DEVICE ? "Read" : "Write", + cmd->t_task_lba, rd_size, rd_page, rd_offset); + + if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && + data_direction == DMA_TO_DEVICE) { + rc = rd_do_prot_rw(cmd, false); + if (rc) + return rc; + } + + src_len = PAGE_SIZE - rd_offset; + sg_miter_start(&m, sgl, sgl_nents, + data_direction == DMA_FROM_DEVICE ? + SG_MITER_TO_SG : SG_MITER_FROM_SG); + while (rd_size) { + u32 len; + void *rd_addr; + + sg_miter_next(&m); + if (!(u32)m.length) { + pr_debug("RD[%u]: invalid sgl %p len %zu\n", + dev->rd_dev_id, m.addr, m.length); + sg_miter_stop(&m); + return TCM_INCORRECT_AMOUNT_OF_DATA; + } + len = min((u32)m.length, src_len); + if (len > rd_size) { + pr_debug("RD[%u]: size underrun page %d offset %d " + "size %d\n", dev->rd_dev_id, + rd_page, rd_offset, rd_size); + len = rd_size; + } + m.consumed = len; + + rd_addr = sg_virt(rd_sg) + rd_offset; + + if (data_direction == DMA_FROM_DEVICE) + memcpy(m.addr, rd_addr, len); + else + memcpy(rd_addr, m.addr, len); + + rd_size -= len; + if (!rd_size) + continue; + + src_len -= len; + if (src_len) { + rd_offset += len; + continue; + } + + /* rd page completed, next one please */ + rd_page++; + rd_offset = 0; + src_len = PAGE_SIZE; + if (rd_page <= table->page_end_offset) { + rd_sg++; + continue; + } + + table = rd_get_sg_table(dev, rd_page); + if (!table) { + sg_miter_stop(&m); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + /* since we increment, the first sg entry is correct */ + rd_sg = table->sg_table; + } + sg_miter_stop(&m); + + if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && + data_direction == DMA_FROM_DEVICE) { + rc = rd_do_prot_rw(cmd, true); + if (rc) + return rc; + } + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +enum { + Opt_rd_pages, Opt_rd_nullio, Opt_rd_dummy, Opt_err +}; + +static match_table_t tokens = { + {Opt_rd_pages, "rd_pages=%d"}, + {Opt_rd_nullio, "rd_nullio=%d"}, + {Opt_rd_dummy, "rd_dummy=%d"}, + {Opt_err, NULL} +}; + +static ssize_t rd_set_configfs_dev_params(struct se_device *dev, + const char *page, ssize_t count) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + char *orig, *ptr, *opts; + substring_t args[MAX_OPT_ARGS]; + int arg, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",\n")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_rd_pages: + match_int(args, &arg); + rd_dev->rd_page_count = arg; + pr_debug("RAMDISK: Referencing Page" + " Count: %u\n", rd_dev->rd_page_count); + rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; + break; + case Opt_rd_nullio: + match_int(args, &arg); + if (arg != 1) + break; + + pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); + rd_dev->rd_flags |= RDF_NULLIO; + break; + case Opt_rd_dummy: + match_int(args, &arg); + if (arg != 1) + break; + + pr_debug("RAMDISK: Setting DUMMY flag: %d\n", arg); + rd_dev->rd_flags |= RDF_DUMMY; + break; + default: + break; + } + } + + kfree(orig); + return count; +} + +static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", + rd_dev->rd_dev_id); + bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" + " SG_table_count: %u nullio: %d dummy: %d\n", + rd_dev->rd_page_count, + PAGE_SIZE, rd_dev->sg_table_count, + !!(rd_dev->rd_flags & RDF_NULLIO), + !!(rd_dev->rd_flags & RDF_DUMMY)); + return bl; +} + +static u32 rd_get_device_type(struct se_device *dev) +{ + if (RD_DEV(dev)->rd_flags & RDF_DUMMY) + return 0x3f; /* Unknown device type, not connected */ + else + return sbc_get_device_type(dev); +} + +static sector_t rd_get_blocks(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / + dev->dev_attrib.block_size) - 1; + + return blocks_long; +} + +static int rd_init_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + if (!dev->dev_attrib.pi_prot_type) + return 0; + + return rd_build_prot_space(rd_dev, dev->prot_length, + dev->dev_attrib.block_size); +} + +static void rd_free_prot(struct se_device *dev) +{ + struct rd_dev *rd_dev = RD_DEV(dev); + + rd_release_prot_space(rd_dev); +} + +static struct exec_cmd_ops rd_exec_cmd_ops = { + .execute_rw = rd_execute_rw, +}; + +static sense_reason_t +rd_parse_cdb(struct se_cmd *cmd) +{ + return sbc_parse_cdb(cmd, &rd_exec_cmd_ops); +} + +static const struct target_backend_ops rd_mcp_ops = { + .name = "rd_mcp", + .inquiry_prod = "RAMDISK-MCP", + .inquiry_rev = RD_MCP_VERSION, + .attach_hba = rd_attach_hba, + .detach_hba = rd_detach_hba, + .alloc_device = rd_alloc_device, + .configure_device = rd_configure_device, + .destroy_device = rd_destroy_device, + .free_device = rd_free_device, + .parse_cdb = rd_parse_cdb, + .set_configfs_dev_params = rd_set_configfs_dev_params, + .show_configfs_dev_params = rd_show_configfs_dev_params, + .get_device_type = rd_get_device_type, + .get_blocks = rd_get_blocks, + .init_prot = rd_init_prot, + .free_prot = rd_free_prot, + .tb_dev_attrib_attrs = sbc_attrib_attrs, +}; + +int __init rd_module_init(void) +{ + return transport_backend_register(&rd_mcp_ops); +} + +void rd_module_exit(void) +{ + target_backend_unregister(&rd_mcp_ops); +} diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h new file mode 100644 index 0000000000..9ffda5c4b5 --- /dev/null +++ b/drivers/target/target_core_rd.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_RD_H +#define TARGET_CORE_RD_H + +#include <linux/module.h> +#include <linux/types.h> +#include <target/target_core_base.h> + +#define RD_HBA_VERSION "v4.0" +#define RD_MCP_VERSION "4.0" + +/* Largest piece of memory kmalloc can allocate */ +#define RD_MAX_ALLOCATION_SIZE 65536 +#define RD_DEVICE_QUEUE_DEPTH 32 +#define RD_MAX_DEVICE_QUEUE_DEPTH 128 +#define RD_BLOCKSIZE 512 + +/* Used in target_core_init_configfs() for virtual LUN 0 access */ +int __init rd_module_init(void); +void rd_module_exit(void); + +struct rd_dev_sg_table { + u32 page_start_offset; + u32 page_end_offset; + u32 rd_sg_count; + struct scatterlist *sg_table; +} ____cacheline_aligned; + +#define RDF_HAS_PAGE_COUNT 0x01 +#define RDF_NULLIO 0x02 +#define RDF_DUMMY 0x04 + +struct rd_dev { + struct se_device dev; + u32 rd_flags; + /* Unique Ramdisk Device ID in Ramdisk HBA */ + u32 rd_dev_id; + /* Total page count for ramdisk device */ + u32 rd_page_count; + /* Number of SG tables in sg_table_array */ + u32 sg_table_count; + /* Number of SG tables in sg_prot_array */ + u32 sg_prot_count; + /* Array of rd_dev_sg_table_t containing scatterlists */ + struct rd_dev_sg_table *sg_table_array; + /* Array of rd_dev_sg_table containing protection scatterlists */ + struct rd_dev_sg_table *sg_prot_array; + /* Ramdisk HBA device is connected to */ + struct rd_host *rd_host; +} ____cacheline_aligned; + +struct rd_host { + u32 rd_host_dev_id_count; + u32 rd_host_id; /* Unique Ramdisk Host ID */ +} ____cacheline_aligned; + +#endif /* TARGET_CORE_RD_H */ diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c new file mode 100644 index 0000000000..6a02561cc2 --- /dev/null +++ b/drivers/target/target_core_sbc.c @@ -0,0 +1,1397 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SCSI Block Commands (SBC) parsing and emulation. + * + * (c) Copyright 2002-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/ratelimit.h> +#include <linux/crc-t10dif.h> +#include <linux/t10-pi.h> +#include <asm/unaligned.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_tcq.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_ua.h" +#include "target_core_alua.h" + +static sense_reason_t +sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); +static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); + +static sense_reason_t +sbc_emulate_readcapacity(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *cdb = cmd->t_task_cdb; + unsigned long long blocks_long = dev->transport->get_blocks(dev); + unsigned char *rbuf; + unsigned char buf[8]; + u32 blocks; + + /* + * SBC-2 says: + * If the PMI bit is set to zero and the LOGICAL BLOCK + * ADDRESS field is not set to zero, the device server shall + * terminate the command with CHECK CONDITION status with + * the sense key set to ILLEGAL REQUEST and the additional + * sense code set to INVALID FIELD IN CDB. + * + * In SBC-3, these fields are obsolete, but some SCSI + * compliance tests actually check this, so we might as well + * follow SBC-2. + */ + if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) + return TCM_INVALID_CDB_FIELD; + + if (blocks_long >= 0x00000000ffffffff) + blocks = 0xffffffff; + else + blocks = (u32)blocks_long; + + put_unaligned_be32(blocks, &buf[0]); + put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]); + + rbuf = transport_kmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } + + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8); + return 0; +} + +static sense_reason_t +sbc_emulate_readcapacity_16(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + int pi_prot_type = dev->dev_attrib.pi_prot_type; + + unsigned char *rbuf; + unsigned char buf[32]; + unsigned long long blocks = dev->transport->get_blocks(dev); + + memset(buf, 0, sizeof(buf)); + put_unaligned_be64(blocks, &buf[0]); + put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]); + /* + * Set P_TYPE and PROT_EN bits for DIF support + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + /* + * Only override a device's pi_prot_type if no T10-PI is + * available, and sess_prot_type has been explicitly enabled. + */ + if (!pi_prot_type) + pi_prot_type = sess->sess_prot_type; + + if (pi_prot_type) + buf[12] = (pi_prot_type - 1) << 1 | 0x1; + } + + if (dev->transport->get_lbppbe) + buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; + + if (dev->transport->get_alignment_offset_lbas) { + u16 lalba = dev->transport->get_alignment_offset_lbas(dev); + + put_unaligned_be16(lalba, &buf[14]); + } + + /* + * Set Thin Provisioning Enable bit following sbc3r22 in section + * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. + */ + if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) { + buf[14] |= 0x80; + + /* + * LBPRZ signifies that zeroes will be read back from an LBA after + * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2) + */ + if (dev->dev_attrib.unmap_zeroes_data) + buf[14] |= 0x40; + } + + rbuf = transport_kmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } + + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 32); + return 0; +} + +static sense_reason_t +sbc_emulate_startstop(struct se_cmd *cmd) +{ + unsigned char *cdb = cmd->t_task_cdb; + + /* + * See sbc3r36 section 5.25 + * Immediate bit should be set since there is nothing to complete + * POWER CONDITION MODIFIER 0h + */ + if (!(cdb[1] & 1) || cdb[2] || cdb[3]) + return TCM_INVALID_CDB_FIELD; + + /* + * See sbc3r36 section 5.25 + * POWER CONDITION 0h START_VALID - process START and LOEJ + */ + if (cdb[4] >> 4 & 0xf) + return TCM_INVALID_CDB_FIELD; + + /* + * See sbc3r36 section 5.25 + * LOEJ 0h - nothing to load or unload + * START 1h - we are ready + */ + if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4)) + return TCM_INVALID_CDB_FIELD; + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) +{ + u32 num_blocks; + + if (cmd->t_task_cdb[0] == WRITE_SAME) + num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); + else if (cmd->t_task_cdb[0] == WRITE_SAME_16) + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); + else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); + + /* + * Use the explicit range when non zero is supplied, otherwise calculate + * the remaining range based on ->get_blocks() - starting LBA. + */ + if (num_blocks) + return num_blocks; + + return cmd->se_dev->transport->get_blocks(cmd->se_dev) - + cmd->t_task_lba + 1; +} +EXPORT_SYMBOL(sbc_get_write_same_sectors); + +static sense_reason_t +sbc_execute_write_same_unmap(struct se_cmd *cmd) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + sector_t nolb = sbc_get_write_same_sectors(cmd); + sense_reason_t ret; + + if (nolb) { + ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb); + if (ret) + return ret; + } + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +static sense_reason_t +sbc_emulate_noop(struct se_cmd *cmd) +{ + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) +{ + return cmd->se_dev->dev_attrib.block_size * sectors; +} + +static inline u32 transport_get_sectors_6(unsigned char *cdb) +{ + /* + * Use 8-bit sector value. SBC-3 says: + * + * A TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be written. Any other value + * specifies the number of logical blocks that shall be + * written. + */ + return cdb[4] ? : 256; +} + +static inline u32 transport_get_sectors_10(unsigned char *cdb) +{ + return get_unaligned_be16(&cdb[7]); +} + +static inline u32 transport_get_sectors_12(unsigned char *cdb) +{ + return get_unaligned_be32(&cdb[6]); +} + +static inline u32 transport_get_sectors_16(unsigned char *cdb) +{ + return get_unaligned_be32(&cdb[10]); +} + +/* + * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants + */ +static inline u32 transport_get_sectors_32(unsigned char *cdb) +{ + return get_unaligned_be32(&cdb[28]); + +} + +static inline u32 transport_lba_21(unsigned char *cdb) +{ + return get_unaligned_be24(&cdb[1]) & 0x1fffff; +} + +static inline u32 transport_lba_32(unsigned char *cdb) +{ + return get_unaligned_be32(&cdb[2]); +} + +static inline unsigned long long transport_lba_64(unsigned char *cdb) +{ + return get_unaligned_be64(&cdb[2]); +} + +static sense_reason_t +sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, + struct exec_cmd_ops *ops) +{ + struct se_device *dev = cmd->se_dev; + sector_t end_lba = dev->transport->get_blocks(dev) + 1; + unsigned int sectors = sbc_get_write_same_sectors(cmd); + sense_reason_t ret; + + if ((flags & 0x04) || (flags & 0x02)) { + pr_err("WRITE_SAME PBDATA and LBDATA" + " bits not supported for Block Discard" + " Emulation\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { + pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", + sectors, cmd->se_dev->dev_attrib.max_write_same_len); + return TCM_INVALID_CDB_FIELD; + } + /* + * Sanity check for LBA wrap and request past end of device. + */ + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || + ((cmd->t_task_lba + sectors) > end_lba)) { + pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", + (unsigned long long)end_lba, cmd->t_task_lba, sectors); + return TCM_ADDRESS_OUT_OF_RANGE; + } + + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ + if (flags & 0x10) { + pr_warn("WRITE SAME with ANCHOR not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + if (flags & 0x01) { + pr_warn("WRITE SAME with NDOB not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + /* + * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting + * translated into block discard requests within backend code. + */ + if (flags & 0x08) { + if (!ops->execute_unmap) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + if (!dev->dev_attrib.emulate_tpws) { + pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" + " has emulate_tpws disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + cmd->execute_cmd = sbc_execute_write_same_unmap; + return 0; + } + if (!ops->execute_write_same) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); + if (ret) + return ret; + + cmd->execute_cmd = ops->execute_write_same; + return 0; +} + +static sense_reason_t +sbc_execute_rw(struct se_cmd *cmd) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + + return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, + cmd->data_direction); +} + +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, + int *post_ret) +{ + struct se_device *dev = cmd->se_dev; + sense_reason_t ret = TCM_NO_SENSE; + + spin_lock_irq(&cmd->t_state_lock); + if (success) { + *post_ret = 1; + + if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + spin_unlock_irq(&cmd->t_state_lock); + + /* + * Unlock ->caw_sem originally obtained during sbc_compare_and_write() + * before the original READ I/O submission. + */ + up(&dev->caw_sem); + + return ret; +} + +/* + * compare @cmp_len bytes of @read_sgl with @cmp_sgl. On miscompare, fill + * @miscmp_off and return TCM_MISCOMPARE_VERIFY. + */ +static sense_reason_t +compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents, + struct scatterlist *cmp_sgl, unsigned int cmp_nents, + unsigned int cmp_len, unsigned int *miscmp_off) +{ + unsigned char *buf = NULL; + struct scatterlist *sg; + sense_reason_t ret; + unsigned int offset; + size_t rc; + int sg_cnt; + + buf = kzalloc(cmp_len, GFP_KERNEL); + if (!buf) { + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + + rc = sg_copy_to_buffer(cmp_sgl, cmp_nents, buf, cmp_len); + if (!rc) { + pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + /* + * Compare SCSI READ payload against verify payload + */ + offset = 0; + ret = TCM_NO_SENSE; + for_each_sg(read_sgl, sg, read_nents, sg_cnt) { + unsigned int len = min(sg->length, cmp_len); + unsigned char *addr = kmap_atomic(sg_page(sg)); + + if (memcmp(addr, buf + offset, len)) { + unsigned int i; + + for (i = 0; i < len && addr[i] == buf[offset + i]; i++) + ; + *miscmp_off = offset + i; + pr_warn("Detected MISCOMPARE at offset %u\n", + *miscmp_off); + ret = TCM_MISCOMPARE_VERIFY; + } + kunmap_atomic(addr); + if (ret != TCM_NO_SENSE) + goto out; + + offset += len; + cmp_len -= len; + if (!cmp_len) + break; + } + pr_debug("COMPARE AND WRITE read data matches compare data\n"); +out: + kfree(buf); + return ret; +} + +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, + int *post_ret) +{ + struct se_device *dev = cmd->se_dev; + struct sg_table write_tbl = { }; + struct scatterlist *write_sg; + struct sg_mapping_iter m; + unsigned int len; + unsigned int block_size = dev->dev_attrib.block_size; + unsigned int compare_len = (cmd->t_task_nolb * block_size); + unsigned int miscmp_off = 0; + sense_reason_t ret = TCM_NO_SENSE; + int i; + + if (!success) { + /* + * Handle early failure in transport_generic_request_failure(), + * which will not have taken ->caw_sem yet.. + */ + if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) + return TCM_NO_SENSE; + + /* + * The command has been stopped or aborted so + * we don't have to perform the write operation. + */ + WARN_ON(!(cmd->transport_state & + (CMD_T_ABORTED | CMD_T_STOP))); + goto out; + } + /* + * Handle special case for zero-length COMPARE_AND_WRITE + */ + if (!cmd->data_length) + goto out; + /* + * Immediately exit + release dev->caw_sem if command has already + * been failed with a non-zero SCSI status. + */ + if (cmd->scsi_status) { + pr_debug("compare_and_write_callback: non zero scsi_status:" + " 0x%02x\n", cmd->scsi_status); + *post_ret = 1; + if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + + ret = compare_and_write_do_cmp(cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents, + cmd->t_data_sg, + cmd->t_data_nents, + compare_len, + &miscmp_off); + if (ret == TCM_MISCOMPARE_VERIFY) { + /* + * SBC-4 r15: 5.3 COMPARE AND WRITE command + * In the sense data (see 4.18 and SPC-5) the offset from the + * start of the Data-Out Buffer to the first byte of data that + * was not equal shall be reported in the INFORMATION field. + */ + cmd->sense_info = miscmp_off; + goto out; + } else if (ret) + goto out; + + if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) { + pr_err("Unable to allocate compare_and_write sg\n"); + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + write_sg = write_tbl.sgl; + + i = 0; + len = compare_len; + sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); + /* + * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. + */ + while (len) { + sg_miter_next(&m); + + if (block_size < PAGE_SIZE) { + sg_set_page(&write_sg[i], m.page, block_size, + m.piter.sg->offset + block_size); + } else { + sg_miter_next(&m); + sg_set_page(&write_sg[i], m.page, block_size, + m.piter.sg->offset); + } + len -= block_size; + i++; + } + sg_miter_stop(&m); + /* + * Save the original SGL + nents values before updating to new + * assignments, to be released in transport_free_pages() -> + * transport_reset_sgl_orig() + */ + cmd->t_data_sg_orig = cmd->t_data_sg; + cmd->t_data_sg = write_sg; + cmd->t_data_nents_orig = cmd->t_data_nents; + cmd->t_data_nents = 1; + + cmd->sam_task_attr = TCM_HEAD_TAG; + cmd->transport_complete_callback = compare_and_write_post; + /* + * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler + * for submitting the adjusted SGL to write instance user-data. + */ + cmd->execute_cmd = sbc_execute_rw; + + spin_lock_irq(&cmd->t_state_lock); + cmd->t_state = TRANSPORT_PROCESSING; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + + __target_execute_cmd(cmd, false); + + return ret; + +out: + /* + * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in + * sbc_compare_and_write() before the original READ I/O submission. + */ + up(&dev->caw_sem); + sg_free_table(&write_tbl); + return ret; +} + +static sense_reason_t +sbc_compare_and_write(struct se_cmd *cmd) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + struct se_device *dev = cmd->se_dev; + sense_reason_t ret; + int rc; + /* + * Submit the READ first for COMPARE_AND_WRITE to perform the + * comparision using SGLs at cmd->t_bidi_data_sg.. + */ + rc = down_interruptible(&dev->caw_sem); + if (rc != 0) { + cmd->transport_complete_callback = NULL; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * Reset cmd->data_length to individual block_size in order to not + * confuse backend drivers that depend on this value matching the + * size of the I/O being submitted. + */ + cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; + + ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, + DMA_FROM_DEVICE); + if (ret) { + cmd->transport_complete_callback = NULL; + up(&dev->caw_sem); + return ret; + } + /* + * Unlock of dev->caw_sem to occur in compare_and_write_callback() + * upon MISCOMPARE, or in compare_and_write_done() upon completion + * of WRITE instance user-data. + */ + return TCM_NO_SENSE; +} + +static int +sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, + bool is_write, struct se_cmd *cmd) +{ + if (is_write) { + cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : + protect ? TARGET_PROT_DOUT_PASS : + TARGET_PROT_DOUT_INSERT; + switch (protect) { + case 0x0: + case 0x3: + cmd->prot_checks = 0; + break; + case 0x1: + case 0x5: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; + break; + case 0x2: + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; + break; + case 0x4: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + break; + default: + pr_err("Unsupported protect field %d\n", protect); + return -EINVAL; + } + } else { + cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : + protect ? TARGET_PROT_DIN_PASS : + TARGET_PROT_DIN_STRIP; + switch (protect) { + case 0x0: + case 0x1: + case 0x5: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; + break; + case 0x2: + if (prot_type == TARGET_DIF_TYPE1_PROT) + cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; + break; + case 0x3: + cmd->prot_checks = 0; + break; + case 0x4: + cmd->prot_checks = TARGET_DIF_CHECK_GUARD; + break; + default: + pr_err("Unsupported protect field %d\n", protect); + return -EINVAL; + } + } + + return 0; +} + +static sense_reason_t +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, + u32 sectors, bool is_write) +{ + int sp_ops = cmd->se_sess->sup_prot_ops; + int pi_prot_type = dev->dev_attrib.pi_prot_type; + bool fabric_prot = false; + + if (!cmd->t_prot_sg || !cmd->t_prot_nents) { + if (unlikely(protect && + !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { + pr_err("CDB contains protect bit, but device + fabric does" + " not advertise PROTECT=1 feature bit\n"); + return TCM_INVALID_CDB_FIELD; + } + if (cmd->prot_pto) + return TCM_NO_SENSE; + } + + switch (dev->dev_attrib.pi_prot_type) { + case TARGET_DIF_TYPE3_PROT: + cmd->reftag_seed = 0xffffffff; + break; + case TARGET_DIF_TYPE2_PROT: + if (protect) + return TCM_INVALID_CDB_FIELD; + + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE1_PROT: + cmd->reftag_seed = cmd->t_task_lba; + break; + case TARGET_DIF_TYPE0_PROT: + /* + * See if the fabric supports T10-PI, and the session has been + * configured to allow export PROTECT=1 feature bit with backend + * devices that don't support T10-PI. + */ + fabric_prot = is_write ? + !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : + !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); + + if (fabric_prot && cmd->se_sess->sess_prot_type) { + pi_prot_type = cmd->se_sess->sess_prot_type; + break; + } + if (!protect) + return TCM_NO_SENSE; + fallthrough; + default: + pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " + "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); + return TCM_INVALID_CDB_FIELD; + } + + if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) + return TCM_INVALID_CDB_FIELD; + + cmd->prot_type = pi_prot_type; + cmd->prot_length = dev->prot_length * sectors; + + /** + * In case protection information exists over the wire + * we modify command data length to describe pure data. + * The actual transfer length is data length + protection + * length + **/ + if (protect) + cmd->data_length = sectors * dev->dev_attrib.block_size; + + pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " + "prot_op=%d prot_checks=%d\n", + __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, + cmd->prot_op, cmd->prot_checks); + + return TCM_NO_SENSE; +} + +static int +sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) +{ + if (cdb[1] & 0x10) { + /* see explanation in spc_emulate_modesense */ + if (!target_check_fua(dev)) { + pr_err("Got CDB: 0x%02x with DPO bit set, but device" + " does not advertise support for DPO\n", cdb[0]); + return -EINVAL; + } + } + if (cdb[1] & 0x8) { + if (!target_check_fua(dev)) { + pr_err("Got CDB: 0x%02x with FUA bit set, but device" + " does not advertise support for FUA write\n", + cdb[0]); + return -EINVAL; + } + cmd->se_cmd_flags |= SCF_FUA; + } + return 0; +} + +sense_reason_t +sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *cdb = cmd->t_task_cdb; + unsigned int size; + u32 sectors = 0; + sense_reason_t ret; + + cmd->protocol_data = ops; + + switch (cdb[0]) { + case READ_6: + sectors = transport_get_sectors_6(cdb); + cmd->t_task_lba = transport_lba_21(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case READ_10: + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + if (ret) + return ret; + + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case READ_12: + sectors = transport_get_sectors_12(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + if (ret) + return ret; + + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case READ_16: + sectors = transport_get_sectors_16(cdb); + cmd->t_task_lba = transport_lba_64(cdb); + + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + if (ret) + return ret; + + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case WRITE_6: + sectors = transport_get_sectors_6(cdb); + cmd->t_task_lba = transport_lba_21(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case WRITE_10: + case WRITE_VERIFY: + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + if (ret) + return ret; + + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case WRITE_12: + sectors = transport_get_sectors_12(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + if (ret) + return ret; + + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case WRITE_16: + case WRITE_VERIFY_16: + sectors = transport_get_sectors_16(cdb); + cmd->t_task_lba = transport_lba_64(cdb); + + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + if (ret) + return ret; + + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + cmd->execute_cmd = sbc_execute_rw; + break; + case VARIABLE_LENGTH_CMD: + { + u16 service_action = get_unaligned_be16(&cdb[8]); + switch (service_action) { + case WRITE_SAME_32: + sectors = transport_get_sectors_32(cdb); + if (!sectors) { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" + " supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + size = sbc_get_size(cmd, 1); + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); + + ret = sbc_setup_write_same(cmd, cdb[10], ops); + if (ret) + return ret; + break; + default: + pr_err("VARIABLE_LENGTH_CMD service action" + " 0x%04x not supported\n", service_action); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + break; + } + case COMPARE_AND_WRITE: + if (!dev->dev_attrib.emulate_caw) { + pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n", + dev->se_hba->backend->ops->name, + config_item_name(&dev->dev_group.cg_item), + dev->t10_wwn.unit_serial); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + sectors = cdb[13]; + /* + * Currently enforce COMPARE_AND_WRITE for a single sector + */ + if (sectors > 1) { + pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" + " than 1\n", sectors); + return TCM_INVALID_CDB_FIELD; + } + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + + /* + * Double size because we have two buffers, note that + * zero is not an error.. + */ + size = 2 * sbc_get_size(cmd, sectors); + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); + cmd->t_task_nolb = sectors; + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; + cmd->execute_cmd = sbc_compare_and_write; + cmd->transport_complete_callback = compare_and_write_callback; + break; + case READ_CAPACITY: + size = READ_CAP_LEN; + cmd->execute_cmd = sbc_emulate_readcapacity; + break; + case SERVICE_ACTION_IN_16: + switch (cmd->t_task_cdb[1] & 0x1f) { + case SAI_READ_CAPACITY_16: + cmd->execute_cmd = sbc_emulate_readcapacity_16; + break; + case SAI_REPORT_REFERRALS: + cmd->execute_cmd = target_emulate_report_referrals; + break; + default: + pr_err("Unsupported SA: 0x%02x\n", + cmd->t_task_cdb[1] & 0x1f); + return TCM_INVALID_CDB_FIELD; + } + size = get_unaligned_be32(&cdb[10]); + break; + case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: + if (cdb[0] == SYNCHRONIZE_CACHE) { + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + } else { + sectors = transport_get_sectors_16(cdb); + cmd->t_task_lba = transport_lba_64(cdb); + } + if (ops->execute_sync_cache) { + cmd->execute_cmd = ops->execute_sync_cache; + goto check_lba; + } + size = 0; + cmd->execute_cmd = sbc_emulate_noop; + break; + case UNMAP: + if (!ops->execute_unmap) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + if (!dev->dev_attrib.emulate_tpu) { + pr_err("Got UNMAP, but backend device has" + " emulate_tpu disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + size = get_unaligned_be16(&cdb[7]); + cmd->execute_cmd = sbc_execute_unmap; + break; + case WRITE_SAME_16: + sectors = transport_get_sectors_16(cdb); + if (!sectors) { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + size = sbc_get_size(cmd, 1); + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); + + ret = sbc_setup_write_same(cmd, cdb[1], ops); + if (ret) + return ret; + break; + case WRITE_SAME: + sectors = transport_get_sectors_10(cdb); + if (!sectors) { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + size = sbc_get_size(cmd, 1); + cmd->t_task_lba = get_unaligned_be32(&cdb[2]); + + /* + * Follow sbcr26 with WRITE_SAME (10) and check for the existence + * of byte 1 bit 3 UNMAP instead of original reserved field + */ + ret = sbc_setup_write_same(cmd, cdb[1], ops); + if (ret) + return ret; + break; + case VERIFY: + case VERIFY_16: + size = 0; + if (cdb[0] == VERIFY) { + sectors = transport_get_sectors_10(cdb); + cmd->t_task_lba = transport_lba_32(cdb); + } else { + sectors = transport_get_sectors_16(cdb); + cmd->t_task_lba = transport_lba_64(cdb); + } + cmd->execute_cmd = sbc_emulate_noop; + goto check_lba; + case REZERO_UNIT: + case SEEK_6: + case SEEK_10: + /* + * There are still clients out there which use these old SCSI-2 + * commands. This mainly happens when running VMs with legacy + * guest systems, connected via SCSI command pass-through to + * iSCSI targets. Make them happy and return status GOOD. + */ + size = 0; + cmd->execute_cmd = sbc_emulate_noop; + break; + case START_STOP: + size = 0; + cmd->execute_cmd = sbc_emulate_startstop; + break; + default: + ret = spc_parse_cdb(cmd, &size); + if (ret) + return ret; + } + + /* reject any command that we don't have a handler for */ + if (!cmd->execute_cmd) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { + unsigned long long end_lba; +check_lba: + end_lba = dev->transport->get_blocks(dev) + 1; + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || + ((cmd->t_task_lba + sectors) > end_lba)) { + pr_err("cmd exceeds last lba %llu " + "(lba %llu, sectors %u)\n", + end_lba, cmd->t_task_lba, sectors); + return TCM_ADDRESS_OUT_OF_RANGE; + } + + if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) + size = sbc_get_size(cmd, sectors); + } + + return target_cmd_size_check(cmd, size); +} +EXPORT_SYMBOL(sbc_parse_cdb); + +u32 sbc_get_device_type(struct se_device *dev) +{ + return TYPE_DISK; +} +EXPORT_SYMBOL(sbc_get_device_type); + +static sense_reason_t +sbc_execute_unmap(struct se_cmd *cmd) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + struct se_device *dev = cmd->se_dev; + unsigned char *buf, *ptr = NULL; + sector_t lba; + int size; + u32 range; + sense_reason_t ret = 0; + int dl, bd_dl; + + /* We never set ANC_SUP */ + if (cmd->t_task_cdb[1]) + return TCM_INVALID_CDB_FIELD; + + if (cmd->data_length == 0) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + + if (cmd->data_length < 8) { + pr_warn("UNMAP parameter list length %u too small\n", + cmd->data_length); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + dl = get_unaligned_be16(&buf[0]); + bd_dl = get_unaligned_be16(&buf[2]); + + size = cmd->data_length - 8; + if (bd_dl > size) + pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", + cmd->data_length, bd_dl); + else + size = bd_dl; + + if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { + ret = TCM_INVALID_PARAMETER_LIST; + goto err; + } + + /* First UNMAP block descriptor starts at 8 byte offset */ + ptr = &buf[8]; + pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" + " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); + + while (size >= 16) { + lba = get_unaligned_be64(&ptr[0]); + range = get_unaligned_be32(&ptr[8]); + pr_debug("UNMAP: Using lba: %llu and range: %u\n", + (unsigned long long)lba, range); + + if (range > dev->dev_attrib.max_unmap_lba_count) { + ret = TCM_INVALID_PARAMETER_LIST; + goto err; + } + + if (lba + range > dev->transport->get_blocks(dev) + 1) { + ret = TCM_ADDRESS_OUT_OF_RANGE; + goto err; + } + + if (range) { + ret = ops->execute_unmap(cmd, lba, range); + if (ret) + goto err; + } + + ptr += 16; + size -= 16; + } + +err: + transport_kunmap_data_sg(cmd); + if (!ret) + target_complete_cmd(cmd, SAM_STAT_GOOD); + return ret; +} + +void +sbc_dif_generate(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + struct t10_pi_tuple *sdt; + struct scatterlist *dsg = cmd->t_data_sg, *psg; + sector_t sector = cmd->t_task_lba; + void *daddr, *paddr; + int i, j, offset = 0; + unsigned int block_size = dev->dev_attrib.block_size; + + for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + + for (j = 0; j < psg->length; + j += sizeof(*sdt)) { + __u16 crc; + unsigned int avail; + + if (offset >= dsg->length) { + offset -= dsg->length; + kunmap_atomic(daddr - dsg->offset); + dsg = sg_next(dsg); + if (!dsg) { + kunmap_atomic(paddr - psg->offset); + return; + } + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + } + + sdt = paddr + j; + avail = min(block_size, dsg->length - offset); + crc = crc_t10dif(daddr + offset, avail); + if (avail < block_size) { + kunmap_atomic(daddr - dsg->offset); + dsg = sg_next(dsg); + if (!dsg) { + kunmap_atomic(paddr - psg->offset); + return; + } + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + offset = block_size - avail; + crc = crc_t10dif_update(crc, daddr, offset); + } else { + offset += block_size; + } + + sdt->guard_tag = cpu_to_be16(crc); + if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) + sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); + sdt->app_tag = 0; + + pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (cmd->data_direction == DMA_TO_DEVICE) ? + "WRITE" : "READ", (unsigned long long)sector, + sdt->guard_tag, sdt->app_tag, + be32_to_cpu(sdt->ref_tag)); + + sector++; + } + + kunmap_atomic(daddr - dsg->offset); + kunmap_atomic(paddr - psg->offset); + } +} + +static sense_reason_t +sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt, + __u16 crc, sector_t sector, unsigned int ei_lba) +{ + __be16 csum; + + if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) + goto check_ref; + + csum = cpu_to_be16(crc); + + if (sdt->guard_tag != csum) { + pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" + " csum 0x%04x\n", (unsigned long long)sector, + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); + return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + } + +check_ref: + if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) + return 0; + + if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && + be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { + pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" + " sector MSB: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && + be32_to_cpu(sdt->ref_tag) != ei_lba) { + pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" + " ei_lba: 0x%08x\n", (unsigned long long)sector, + be32_to_cpu(sdt->ref_tag), ei_lba); + return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + } + + return 0; +} + +void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, + struct scatterlist *sg, int sg_off) +{ + struct se_device *dev = cmd->se_dev; + struct scatterlist *psg; + void *paddr, *addr; + unsigned int i, len, left; + unsigned int offset = sg_off; + + if (!sg) + return; + + left = sectors * dev->prot_length; + + for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { + unsigned int psg_len, copied = 0; + + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + psg_len = min(left, psg->length); + while (psg_len) { + len = min(psg_len, sg->length - offset); + addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; + + if (read) + memcpy(paddr + copied, addr, len); + else + memcpy(addr, paddr + copied, len); + + left -= len; + offset += len; + copied += len; + psg_len -= len; + + kunmap_atomic(addr - sg->offset - offset); + + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + } + } + kunmap_atomic(paddr - psg->offset); + } +} +EXPORT_SYMBOL(sbc_dif_copy_prot); + +sense_reason_t +sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, + unsigned int ei_lba, struct scatterlist *psg, int psg_off) +{ + struct se_device *dev = cmd->se_dev; + struct t10_pi_tuple *sdt; + struct scatterlist *dsg = cmd->t_data_sg; + sector_t sector = start; + void *daddr, *paddr; + int i; + sense_reason_t rc; + int dsg_off = 0; + unsigned int block_size = dev->dev_attrib.block_size; + + for (; psg && sector < start + sectors; psg = sg_next(psg)) { + paddr = kmap_atomic(sg_page(psg)) + psg->offset; + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + + for (i = psg_off; i < psg->length && + sector < start + sectors; + i += sizeof(*sdt)) { + __u16 crc; + unsigned int avail; + + if (dsg_off >= dsg->length) { + dsg_off -= dsg->length; + kunmap_atomic(daddr - dsg->offset); + dsg = sg_next(dsg); + if (!dsg) { + kunmap_atomic(paddr - psg->offset); + return 0; + } + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + } + + sdt = paddr + i; + + pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" + " app_tag: 0x%04x ref_tag: %u\n", + (unsigned long long)sector, sdt->guard_tag, + sdt->app_tag, be32_to_cpu(sdt->ref_tag)); + + if (sdt->app_tag == T10_PI_APP_ESCAPE) { + dsg_off += block_size; + goto next; + } + + avail = min(block_size, dsg->length - dsg_off); + crc = crc_t10dif(daddr + dsg_off, avail); + if (avail < block_size) { + kunmap_atomic(daddr - dsg->offset); + dsg = sg_next(dsg); + if (!dsg) { + kunmap_atomic(paddr - psg->offset); + return 0; + } + daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; + dsg_off = block_size - avail; + crc = crc_t10dif_update(crc, daddr, dsg_off); + } else { + dsg_off += block_size; + } + + rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba); + if (rc) { + kunmap_atomic(daddr - dsg->offset); + kunmap_atomic(paddr - psg->offset); + cmd->sense_info = sector; + return rc; + } +next: + sector++; + ei_lba++; + } + + psg_off = 0; + kunmap_atomic(daddr - dsg->offset); + kunmap_atomic(paddr - psg->offset); + } + + return 0; +} +EXPORT_SYMBOL(sbc_dif_verify); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c new file mode 100644 index 0000000000..50290abc07 --- /dev/null +++ b/drivers/target/target_core_spc.c @@ -0,0 +1,2434 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SCSI Primary Commands (SPC) parsing and emulation. + * + * (c) Copyright 2002-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/unaligned.h> + +#include <scsi/scsi_proto.h> +#include <scsi/scsi_common.h> +#include <scsi/scsi_tcq.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" +#include "target_core_ua.h" +#include "target_core_xcopy.h" + +static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp; + + /* + * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. + */ + buf[5] = 0x80; + + /* + * Set TPGS field for explicit and/or implicit ALUA access type + * and opteration. + * + * See spc4r17 section 6.4.2 Table 135 + */ + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (tg_pt_gp) + buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; + rcu_read_unlock(); +} + +static u16 +spc_find_scsi_transport_vd(int proto_id) +{ + switch (proto_id) { + case SCSI_PROTOCOL_FCP: + return SCSI_VERSION_DESCRIPTOR_FCP4; + case SCSI_PROTOCOL_ISCSI: + return SCSI_VERSION_DESCRIPTOR_ISCSI; + case SCSI_PROTOCOL_SAS: + return SCSI_VERSION_DESCRIPTOR_SAS3; + case SCSI_PROTOCOL_SBP: + return SCSI_VERSION_DESCRIPTOR_SBP3; + case SCSI_PROTOCOL_SRP: + return SCSI_VERSION_DESCRIPTOR_SRP; + default: + pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI" + " transport PROTOCOL IDENTIFIER %#x\n", proto_id); + return 0; + } +} + +sense_reason_t +spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_lun *lun = cmd->se_lun; + struct se_portal_group *tpg = lun->lun_tpg; + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + + /* Set RMB (removable media) for tape devices */ + if (dev->transport->get_device_type(dev) == TYPE_TAPE) + buf[1] = 0x80; + + buf[2] = 0x06; /* SPC-4 */ + + /* + * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 + * + * SPC4 says: + * A RESPONSE DATA FORMAT field set to 2h indicates that the + * standard INQUIRY data is in the format defined in this + * standard. Response data format values less than 2h are + * obsolete. Response data format values greater than 2h are + * reserved. + */ + buf[3] = 2; + + /* + * Enable SCCS and TPGS fields for Emulated ALUA + */ + spc_fill_alua_data(lun, buf); + + /* + * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY + */ + if (dev->dev_attrib.emulate_3pc) + buf[5] |= 0x8; + /* + * Set Protection (PROTECT) bit when DIF has been enabled on the + * device, and the fabric supports VERIFY + PASS. Also report + * PROTECT=1 if sess_prot_type has been configured to allow T10-PI + * to unprotected devices. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) + buf[5] |= 0x1; + } + + /* + * Set MULTIP bit to indicate presence of multiple SCSI target ports + */ + if (dev->export_count > 1) + buf[6] |= 0x10; + + buf[7] = 0x2; /* CmdQue=1 */ + + /* + * ASCII data fields described as being left-aligned shall have any + * unused bytes at the end of the field (i.e., highest offset) and the + * unused bytes shall be filled with ASCII space characters (20h). + */ + memset(&buf[8], 0x20, + INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN); + memcpy(&buf[8], dev->t10_wwn.vendor, + strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN)); + memcpy(&buf[16], dev->t10_wwn.model, + strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN)); + memcpy(&buf[32], dev->t10_wwn.revision, + strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN)); + + /* + * Set the VERSION DESCRIPTOR fields + */ + put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]); + put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]); + put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]); + if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK) + put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]); + + buf[4] = 91; /* Set additional length to 91 */ + + return 0; +} +EXPORT_SYMBOL(spc_emulate_inquiry_std); + +/* unit serial number */ +static sense_reason_t +spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + u16 len; + + if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { + len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); + len++; /* Extra Byte for NULL Terminator */ + buf[3] = len; + } + return 0; +} + +/* + * Generate NAA IEEE Registered Extended designator + */ +void spc_gen_naa_6h_vendor_specific(struct se_device *dev, + unsigned char *buf) +{ + unsigned char *p = &dev->t10_wwn.unit_serial[0]; + u32 company_id = dev->t10_wwn.company_id; + int cnt, off = 0; + bool next = true; + + /* + * Start NAA IEEE Registered Extended Identifier/Designator + */ + buf[off] = 0x6 << 4; + + /* IEEE COMPANY_ID */ + buf[off++] |= (company_id >> 20) & 0xf; + buf[off++] = (company_id >> 12) & 0xff; + buf[off++] = (company_id >> 4) & 0xff; + buf[off] = (company_id & 0xf) << 4; + + /* + * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on + * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field + * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION + * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL + * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure + * per device uniqeness. + */ + for (cnt = off + 13; *p && off < cnt; p++) { + int val = hex_to_bin(*p); + + if (val < 0) + continue; + + if (next) { + next = false; + buf[off++] |= val; + } else { + next = true; + buf[off] = val << 4; + } + } +} + +/* + * Device identification VPD, for a complete list of + * DESIGNATOR TYPEs see spc4r17 Table 459. + */ +sense_reason_t +spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + struct se_lun *lun = cmd->se_lun; + struct se_portal_group *tpg = NULL; + struct t10_alua_lu_gp_member *lu_gp_mem; + struct t10_alua_tg_pt_gp *tg_pt_gp; + unsigned char *prod = &dev->t10_wwn.model[0]; + u32 off = 0; + u16 len = 0, id_len; + + off = 4; + + /* + * NAA IEEE Registered Extended Assigned designator format, see + * spc4r17 section 7.7.3.6.5 + * + * We depend upon a target_core_mod/ConfigFS provided + * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial + * value in order to return the NAA id. + */ + if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) + goto check_t10_vend_desc; + + /* CODE SET == Binary */ + buf[off++] = 0x1; + + /* Set ASSOCIATION == addressed logical unit: 0)b */ + buf[off] = 0x00; + + /* Identifier/Designator type == NAA identifier */ + buf[off++] |= 0x3; + off++; + + /* Identifier/Designator length */ + buf[off++] = 0x10; + + /* NAA IEEE Registered Extended designator */ + spc_gen_naa_6h_vendor_specific(dev, &buf[off]); + + len = 20; + off = (len + 4); + +check_t10_vend_desc: + /* + * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 + */ + id_len = 8; /* For Vendor field */ + + if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) + id_len += sprintf(&buf[off+12], "%s:%s", prod, + &dev->t10_wwn.unit_serial[0]); + buf[off] = 0x2; /* ASCII */ + buf[off+1] = 0x1; /* T10 Vendor ID */ + buf[off+2] = 0x0; + /* left align Vendor ID and pad with spaces */ + memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN); + memcpy(&buf[off+4], dev->t10_wwn.vendor, + strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN)); + /* Extra Byte for NULL Terminator */ + id_len++; + /* Identifier Length */ + buf[off+3] = id_len; + /* Header size for Designation descriptor */ + len += (id_len + 4); + off += (id_len + 4); + + if (1) { + struct t10_alua_lu_gp *lu_gp; + u32 padding, scsi_name_len, scsi_target_len; + u16 lu_gp_id = 0; + u16 tg_pt_gp_id = 0; + u16 tpgt; + + tpg = lun->lun_tpg; + /* + * Relative target port identifer, see spc4r17 + * section 7.7.3.7 + * + * Get the PROTOCOL IDENTIFIER as defined by spc4r17 + * section 7.5.1 Table 362 + */ + buf[off] = tpg->proto_id << 4; + buf[off++] |= 0x1; /* CODE SET == Binary */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOCIATION == target port: 01b */ + buf[off] |= 0x10; + /* DESIGNATOR TYPE == Relative target port identifer */ + buf[off++] |= 0x4; + off++; /* Skip over Reserved */ + buf[off++] = 4; /* DESIGNATOR LENGTH */ + /* Skip over Obsolete field in RTPI payload + * in Table 472 */ + off += 2; + put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]); + off += 2; + len += 8; /* Header size + Designation descriptor */ + /* + * Target port group identifier, see spc4r17 + * section 7.7.3.8 + * + * Get the PROTOCOL IDENTIFIER as defined by spc4r17 + * section 7.5.1 Table 362 + */ + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (!tg_pt_gp) { + rcu_read_unlock(); + goto check_lu_gp; + } + tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; + rcu_read_unlock(); + + buf[off] = tpg->proto_id << 4; + buf[off++] |= 0x1; /* CODE SET == Binary */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOCIATION == target port: 01b */ + buf[off] |= 0x10; + /* DESIGNATOR TYPE == Target port group identifier */ + buf[off++] |= 0x5; + off++; /* Skip over Reserved */ + buf[off++] = 4; /* DESIGNATOR LENGTH */ + off += 2; /* Skip over Reserved Field */ + put_unaligned_be16(tg_pt_gp_id, &buf[off]); + off += 2; + len += 8; /* Header size + Designation descriptor */ + /* + * Logical Unit Group identifier, see spc4r17 + * section 7.7.3.8 + */ +check_lu_gp: + lu_gp_mem = dev->dev_alua_lu_gp_mem; + if (!lu_gp_mem) + goto check_scsi_name; + + spin_lock(&lu_gp_mem->lu_gp_mem_lock); + lu_gp = lu_gp_mem->lu_gp; + if (!lu_gp) { + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + goto check_scsi_name; + } + lu_gp_id = lu_gp->lu_gp_id; + spin_unlock(&lu_gp_mem->lu_gp_mem_lock); + + buf[off++] |= 0x1; /* CODE SET == Binary */ + /* DESIGNATOR TYPE == Logical Unit Group identifier */ + buf[off++] |= 0x6; + off++; /* Skip over Reserved */ + buf[off++] = 4; /* DESIGNATOR LENGTH */ + off += 2; /* Skip over Reserved Field */ + put_unaligned_be16(lu_gp_id, &buf[off]); + off += 2; + len += 8; /* Header size + Designation descriptor */ + /* + * SCSI name string designator, see spc4r17 + * section 7.7.3.11 + * + * Get the PROTOCOL IDENTIFIER as defined by spc4r17 + * section 7.5.1 Table 362 + */ +check_scsi_name: + buf[off] = tpg->proto_id << 4; + buf[off++] |= 0x3; /* CODE SET == UTF-8 */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOCIATION == target port: 01b */ + buf[off] |= 0x10; + /* DESIGNATOR TYPE == SCSI name string */ + buf[off++] |= 0x8; + off += 2; /* Skip over Reserved and length */ + /* + * SCSI name string identifer containing, $FABRIC_MOD + * dependent information. For LIO-Target and iSCSI + * Target Port, this means "<iSCSI name>,t,0x<TPGT> in + * UTF-8 encoding. + */ + tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); + scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", + tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); + scsi_name_len += 1 /* Include NULL terminator */; + /* + * The null-terminated, null-padded (see 4.4.2) SCSI + * NAME STRING field contains a UTF-8 format string. + * The number of bytes in the SCSI NAME STRING field + * (i.e., the value in the DESIGNATOR LENGTH field) + * shall be no larger than 256 and shall be a multiple + * of four. + */ + padding = ((-scsi_name_len) & 3); + if (padding) + scsi_name_len += padding; + if (scsi_name_len > 256) + scsi_name_len = 256; + + buf[off-1] = scsi_name_len; + off += scsi_name_len; + /* Header size + Designation descriptor */ + len += (scsi_name_len + 4); + + /* + * Target device designator + */ + buf[off] = tpg->proto_id << 4; + buf[off++] |= 0x3; /* CODE SET == UTF-8 */ + buf[off] = 0x80; /* Set PIV=1 */ + /* Set ASSOCIATION == target device: 10b */ + buf[off] |= 0x20; + /* DESIGNATOR TYPE == SCSI name string */ + buf[off++] |= 0x8; + off += 2; /* Skip over Reserved and length */ + /* + * SCSI name string identifer containing, $FABRIC_MOD + * dependent information. For LIO-Target and iSCSI + * Target Port, this means "<iSCSI name>" in + * UTF-8 encoding. + */ + scsi_target_len = sprintf(&buf[off], "%s", + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); + scsi_target_len += 1 /* Include NULL terminator */; + /* + * The null-terminated, null-padded (see 4.4.2) SCSI + * NAME STRING field contains a UTF-8 format string. + * The number of bytes in the SCSI NAME STRING field + * (i.e., the value in the DESIGNATOR LENGTH field) + * shall be no larger than 256 and shall be a multiple + * of four. + */ + padding = ((-scsi_target_len) & 3); + if (padding) + scsi_target_len += padding; + if (scsi_target_len > 256) + scsi_target_len = 256; + + buf[off-1] = scsi_target_len; + off += scsi_target_len; + + /* Header size + Designation descriptor */ + len += (scsi_target_len + 4); + } + put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */ + return 0; +} +EXPORT_SYMBOL(spc_emulate_evpd_83); + +/* Extended INQUIRY Data VPD Page */ +static sense_reason_t +spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + + buf[3] = 0x3c; + /* + * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK + * only for TYPE3 protection. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || + cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) + buf[4] = 0x5; + else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || + cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) + buf[4] = 0x4; + } + + /* logical unit supports type 1 and type 3 protection */ + if ((dev->transport->get_device_type(dev) == TYPE_DISK) && + (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) && + (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) { + buf[4] |= (0x3 << 3); + } + + /* Set HEADSUP, ORDSUP, SIMPSUP */ + buf[5] = 0x07; + + /* If WriteCache emulation is enabled, set V_SUP */ + if (target_check_wce(dev)) + buf[6] = 0x01; + /* If an LBA map is present set R_SUP */ + spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); + if (!list_empty(&dev->t10_alua.lba_map_list)) + buf[8] = 0x10; + spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); + return 0; +} + +/* Block Limits VPD page */ +static sense_reason_t +spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + u32 mtl = 0; + int have_tp = 0, opt, min; + u32 io_max_blocks; + + /* + * Following spc3r22 section 6.5.3 Block Limits VPD page, when + * emulate_tpu=1 or emulate_tpws=1 we will be expect a + * different page length for Thin Provisioning. + */ + if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) + have_tp = 1; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = have_tp ? 0x3c : 0x10; + + /* Set WSNZ to 1 */ + buf[4] = 0x01; + /* + * Set MAXIMUM COMPARE AND WRITE LENGTH + */ + if (dev->dev_attrib.emulate_caw) + buf[5] = 0x01; + + /* + * Set OPTIMAL TRANSFER LENGTH GRANULARITY + */ + if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) + put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); + else + put_unaligned_be16(1, &buf[6]); + + /* + * Set MAXIMUM TRANSFER LENGTH + * + * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics + * enforcing maximum HW scatter-gather-list entry limit + */ + if (cmd->se_tfo->max_data_sg_nents) { + mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / + dev->dev_attrib.block_size; + } + io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors, + dev->dev_attrib.hw_block_size, + dev->dev_attrib.block_size); + put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]); + + /* + * Set OPTIMAL TRANSFER LENGTH + */ + if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) + put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); + else + put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); + + /* + * Exit now if we don't support TP. + */ + if (!have_tp) + goto max_write_same; + + /* + * Set MAXIMUM UNMAP LBA COUNT + */ + put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); + + /* + * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT + */ + put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, + &buf[24]); + + /* + * Set OPTIMAL UNMAP GRANULARITY + */ + put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); + + /* + * UNMAP GRANULARITY ALIGNMENT + */ + put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, + &buf[32]); + if (dev->dev_attrib.unmap_granularity_alignment != 0) + buf[32] |= 0x80; /* Set the UGAVALID bit */ + + /* + * MAXIMUM WRITE SAME LENGTH + */ +max_write_same: + put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); + + return 0; +} + +/* Block Device Characteristics VPD page */ +static sense_reason_t +spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = 0x3c; + buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; + + return 0; +} + +/* Thin Provisioning VPD */ +static sense_reason_t +spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + /* + * From spc3r22 section 6.5.4 Thin Provisioning VPD page: + * + * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to + * zero, then the page length shall be set to 0004h. If the DP bit + * is set to one, then the page length shall be set to the value + * defined in table 162. + */ + buf[0] = dev->transport->get_device_type(dev); + + /* + * Set Hardcoded length mentioned above for DP=0 + */ + put_unaligned_be16(0x0004, &buf[2]); + + /* + * The THRESHOLD EXPONENT field indicates the threshold set size in + * LBAs as a power of 2 (i.e., the threshold set size is equal to + * 2(threshold exponent)). + * + * Note that this is currently set to 0x00 as mkp says it will be + * changing again. We can enable this once it has settled in T10 + * and is actually used by Linux/SCSI ML code. + */ + buf[4] = 0x00; + + /* + * A TPU bit set to one indicates that the device server supports + * the UNMAP command (see 5.25). A TPU bit set to zero indicates + * that the device server does not support the UNMAP command. + */ + if (dev->dev_attrib.emulate_tpu != 0) + buf[5] = 0x80; + + /* + * A TPWS bit set to one indicates that the device server supports + * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. + * A TPWS bit set to zero indicates that the device server does not + * support the use of the WRITE SAME (16) command to unmap LBAs. + */ + if (dev->dev_attrib.emulate_tpws != 0) + buf[5] |= 0x40 | 0x20; + + /* + * The unmap_zeroes_data set means that the underlying device supports + * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This + * satisfies the SBC requirements for LBPRZ, meaning that a subsequent + * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA + * See sbc4r36 6.6.4. + */ + if (((dev->dev_attrib.emulate_tpu != 0) || + (dev->dev_attrib.emulate_tpws != 0)) && + (dev->dev_attrib.unmap_zeroes_data != 0)) + buf[5] |= 0x04; + + return 0; +} + +/* Referrals VPD page */ +static sense_reason_t +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = 0x0c; + put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); + put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); + + return 0; +} + +static sense_reason_t +spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); + +static struct { + uint8_t page; + sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); +} evpd_handlers[] = { + { .page = 0x00, .emulate = spc_emulate_evpd_00 }, + { .page = 0x80, .emulate = spc_emulate_evpd_80 }, + { .page = 0x83, .emulate = spc_emulate_evpd_83 }, + { .page = 0x86, .emulate = spc_emulate_evpd_86 }, + { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, + { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, + { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, + { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, +}; + +/* supported vital product data pages */ +static sense_reason_t +spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) +{ + int p; + + /* + * Only report the INQUIRY EVPD=1 pages after a valid NAA + * Registered Extended LUN WWN has been set via ConfigFS + * during device creation/restart. + */ + if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { + buf[3] = ARRAY_SIZE(evpd_handlers); + for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) + buf[p + 4] = evpd_handlers[p].page; + } + + return 0; +} + +static sense_reason_t +spc_emulate_inquiry(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *rbuf; + unsigned char *cdb = cmd->t_task_cdb; + unsigned char *buf; + sense_reason_t ret; + int p; + int len = 0; + + buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate response buffer for INQUIRY\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + buf[0] = dev->transport->get_device_type(dev); + + if (!(cdb[1] & 0x1)) { + if (cdb[2]) { + pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", + cdb[2]); + ret = TCM_INVALID_CDB_FIELD; + goto out; + } + + ret = spc_emulate_inquiry_std(cmd, buf); + len = buf[4] + 5; + goto out; + } + + for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { + if (cdb[2] == evpd_handlers[p].page) { + buf[1] = cdb[2]; + ret = evpd_handlers[p].emulate(cmd, buf); + len = get_unaligned_be16(&buf[2]) + 4; + goto out; + } + } + + pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]); + ret = TCM_INVALID_CDB_FIELD; + +out: + rbuf = transport_kmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); + transport_kunmap_data_sg(cmd); + } + kfree(buf); + + if (!ret) + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len); + return ret; +} + +static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) +{ + p[0] = 0x01; + p[1] = 0x0a; + + /* No changeable values for now */ + if (pc == 1) + goto out; + +out: + return 12; +} + +static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) +{ + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + + p[0] = 0x0a; + p[1] = 0x0a; + + /* No changeable values for now */ + if (pc == 1) + goto out; + + /* GLTSD: No implicit save of log parameters */ + p[2] = (1 << 1); + if (target_sense_desc_format(dev)) + /* D_SENSE: Descriptor format sense data for 64bit sectors */ + p[2] |= (1 << 2); + + /* + * From spc4r23, 7.4.7 Control mode page + * + * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies + * restrictions on the algorithm used for reordering commands + * having the SIMPLE task attribute (see SAM-4). + * + * Table 368 -- QUEUE ALGORITHM MODIFIER field + * Code Description + * 0h Restricted reordering + * 1h Unrestricted reordering allowed + * 2h to 7h Reserved + * 8h to Fh Vendor specific + * + * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that + * the device server shall order the processing sequence of commands + * having the SIMPLE task attribute such that data integrity is maintained + * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol + * requests is halted at any time, the final value of all data observable + * on the medium shall be the same as if all the commands had been processed + * with the ORDERED task attribute). + * + * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the + * device server may reorder the processing sequence of commands having the + * SIMPLE task attribute in any manner. Any data integrity exposures related to + * command sequence order shall be explicitly handled by the application client + * through the selection of appropriate ommands and task attributes. + */ + p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; + /* + * From spc4r17, section 7.4.6 Control mode Page + * + * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b + * + * 00b: The logical unit shall clear any unit attention condition + * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION + * status and shall not establish a unit attention condition when a com- + * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT + * status. + * + * 10b: The logical unit shall not clear any unit attention condition + * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION + * status and shall not establish a unit attention condition when + * a command is completed with BUSY, TASK SET FULL, or RESERVATION + * CONFLICT status. + * + * 11b a The logical unit shall not clear any unit attention condition + * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION + * status and shall establish a unit attention condition for the + * initiator port associated with the I_T nexus on which the BUSY, + * TASK SET FULL, or RESERVATION CONFLICT status is being returned. + * Depending on the status, the additional sense code shall be set to + * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS + * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE + * command, a unit attention condition shall be established only once + * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless + * to the number of commands completed with one of those status codes. + */ + switch (dev->dev_attrib.emulate_ua_intlck_ctrl) { + case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA: + p[4] = 0x30; + break; + case TARGET_UA_INTLCK_CTRL_NO_CLEAR: + p[4] = 0x20; + break; + default: /* TARGET_UA_INTLCK_CTRL_CLEAR */ + p[4] = 0x00; + break; + } + /* + * From spc4r17, section 7.4.6 Control mode Page + * + * Task Aborted Status (TAS) bit set to zero. + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; + /* + * From spc4r30, section 7.5.7 Control mode page + * + * Application Tag Owner (ATO) bit set to one. + * + * If the ATO bit is set to one the device server shall not modify the + * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection + * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE + * TAG field. + */ + if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { + if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) + p[5] |= 0x80; + } + + p[8] = 0xff; + p[9] = 0xff; + p[11] = 30; + +out: + return 12; +} + +static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) +{ + struct se_device *dev = cmd->se_dev; + + p[0] = 0x08; + p[1] = 0x12; + + /* No changeable values for now */ + if (pc == 1) + goto out; + + if (target_check_wce(dev)) + p[2] = 0x04; /* Write Cache Enable */ + p[12] = 0x20; /* Disabled Read Ahead */ + +out: + return 20; +} + +static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) +{ + p[0] = 0x1c; + p[1] = 0x0a; + + /* No changeable values for now */ + if (pc == 1) + goto out; + +out: + return 12; +} + +static struct { + uint8_t page; + uint8_t subpage; + int (*emulate)(struct se_cmd *, u8, unsigned char *); +} modesense_handlers[] = { + { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, + { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, + { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, + { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, +}; + +static void spc_modesense_write_protect(unsigned char *buf, int type) +{ + /* + * I believe that the WP bit (bit 7) in the mode header is the same for + * all device types.. + */ + switch (type) { + case TYPE_DISK: + case TYPE_TAPE: + default: + buf[0] |= 0x80; /* WP bit */ + break; + } +} + +static void spc_modesense_dpofua(unsigned char *buf, int type) +{ + switch (type) { + case TYPE_DISK: + buf[0] |= 0x10; /* DPOFUA bit */ + break; + default: + break; + } +} + +static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) +{ + *buf++ = 8; + put_unaligned_be32(min(blocks, 0xffffffffull), buf); + buf += 4; + put_unaligned_be32(block_size, buf); + return 9; +} + +static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) +{ + if (blocks <= 0xffffffff) + return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; + + *buf++ = 1; /* LONGLBA */ + buf += 2; + *buf++ = 16; + put_unaligned_be64(blocks, buf); + buf += 12; + put_unaligned_be32(block_size, buf); + + return 17; +} + +static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + char *cdb = cmd->t_task_cdb; + unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; + int type = dev->transport->get_device_type(dev); + int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); + bool dbd = !!(cdb[1] & 0x08); + bool llba = ten ? !!(cdb[1] & 0x10) : false; + u8 pc = cdb[2] >> 6; + u8 page = cdb[2] & 0x3f; + u8 subpage = cdb[3]; + int length = 0; + int ret; + int i; + + memset(buf, 0, SE_MODE_PAGE_BUF); + + /* + * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for + * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). + */ + length = ten ? 3 : 2; + + /* DEVICE-SPECIFIC PARAMETER */ + if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd)) + spc_modesense_write_protect(&buf[length], type); + + /* + * SBC only allows us to enable FUA and DPO together. Fortunately + * DPO is explicitly specified as a hint, so a noop is a perfectly + * valid implementation. + */ + if (target_check_fua(dev)) + spc_modesense_dpofua(&buf[length], type); + + ++length; + + /* BLOCK DESCRIPTOR */ + + /* + * For now we only include a block descriptor for disk (SBC) + * devices; other command sets use a slightly different format. + */ + if (!dbd && type == TYPE_DISK) { + u64 blocks = dev->transport->get_blocks(dev); + u32 block_size = dev->dev_attrib.block_size; + + if (ten) { + if (llba) { + length += spc_modesense_long_blockdesc(&buf[length], + blocks, block_size); + } else { + length += 3; + length += spc_modesense_blockdesc(&buf[length], + blocks, block_size); + } + } else { + length += spc_modesense_blockdesc(&buf[length], blocks, + block_size); + } + } else { + if (ten) + length += 4; + else + length += 1; + } + + if (page == 0x3f) { + if (subpage != 0x00 && subpage != 0xff) { + pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); + return TCM_INVALID_CDB_FIELD; + } + + for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { + /* + * Tricky way to say all subpage 00h for + * subpage==0, all subpages for subpage==0xff + * (and we just checked above that those are + * the only two possibilities). + */ + if ((modesense_handlers[i].subpage & ~subpage) == 0) { + ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); + if (!ten && length + ret >= 255) + break; + length += ret; + } + } + + goto set_length; + } + + for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) + if (modesense_handlers[i].page == page && + modesense_handlers[i].subpage == subpage) { + length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); + goto set_length; + } + + /* + * We don't intend to implement: + * - obsolete page 03h "format parameters" (checked by Solaris) + */ + if (page != 0x03) + pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", + page, subpage); + + return TCM_UNKNOWN_MODE_PAGE; + +set_length: + if (ten) + put_unaligned_be16(length - 2, buf); + else + buf[0] = length - 1; + + rbuf = transport_kmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); + transport_kunmap_data_sg(cmd); + } + + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length); + return 0; +} + +static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) +{ + char *cdb = cmd->t_task_cdb; + bool ten = cdb[0] == MODE_SELECT_10; + int off = ten ? 8 : 4; + bool pf = !!(cdb[1] & 0x10); + u8 page, subpage; + unsigned char *buf; + unsigned char tbuf[SE_MODE_PAGE_BUF]; + int length; + sense_reason_t ret = 0; + int i; + + if (!cmd->data_length) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + + if (cmd->data_length < off + 2) + return TCM_PARAMETER_LIST_LENGTH_ERROR; + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + if (!pf) { + ret = TCM_INVALID_CDB_FIELD; + goto out; + } + + page = buf[off] & 0x3f; + subpage = buf[off] & 0x40 ? buf[off + 1] : 0; + + for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) + if (modesense_handlers[i].page == page && + modesense_handlers[i].subpage == subpage) { + memset(tbuf, 0, SE_MODE_PAGE_BUF); + length = modesense_handlers[i].emulate(cmd, 0, tbuf); + goto check_contents; + } + + ret = TCM_UNKNOWN_MODE_PAGE; + goto out; + +check_contents: + if (cmd->data_length < off + length) { + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } + + if (memcmp(buf + off, tbuf, length)) + ret = TCM_INVALID_PARAMETER_LIST; + +out: + transport_kunmap_data_sg(cmd); + + if (!ret) + target_complete_cmd(cmd, SAM_STAT_GOOD); + return ret; +} + +static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) +{ + unsigned char *cdb = cmd->t_task_cdb; + unsigned char *rbuf; + u8 ua_asc = 0, ua_ascq = 0; + unsigned char buf[SE_SENSE_BUF]; + bool desc_format = target_sense_desc_format(cmd->se_dev); + + memset(buf, 0, SE_SENSE_BUF); + + if (cdb[1] & 0x01) { + pr_err("REQUEST_SENSE description emulation not" + " supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + rbuf = transport_kmap_data_sg(cmd); + if (!rbuf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) + scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION, + ua_asc, ua_ascq); + else + scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0); + + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) +{ + struct se_dev_entry *deve; + struct se_session *sess = cmd->se_sess; + struct se_node_acl *nacl; + struct scsi_lun slun; + unsigned char *buf; + u32 lun_count = 0, offset = 8; + __be32 len; + + buf = transport_kmap_data_sg(cmd); + if (cmd->data_length && !buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + /* + * If no struct se_session pointer is present, this struct se_cmd is + * coming via a target_core_mod PASSTHROUGH op, and not through + * a $FABRIC_MOD. In that case, report LUN=0 only. + */ + if (!sess) + goto done; + + nacl = sess->se_node_acl; + + rcu_read_lock(); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { + /* + * We determine the correct LUN LIST LENGTH even once we + * have reached the initial allocation length. + * See SPC2-R20 7.19. + */ + lun_count++; + if (offset >= cmd->data_length) + continue; + + int_to_scsilun(deve->mapped_lun, &slun); + memcpy(buf + offset, &slun, + min(8u, cmd->data_length - offset)); + offset += 8; + } + rcu_read_unlock(); + + /* + * See SPC3 r07, page 159. + */ +done: + /* + * If no LUNs are accessible, report virtual LUN 0. + */ + if (lun_count == 0) { + int_to_scsilun(0, &slun); + if (cmd->data_length > 8) + memcpy(buf + offset, &slun, + min(8u, cmd->data_length - offset)); + lun_count = 1; + } + + if (buf) { + len = cpu_to_be32(lun_count * 8); + memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length)); + transport_kunmap_data_sg(cmd); + } + + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8); + return 0; +} +EXPORT_SYMBOL(spc_emulate_report_luns); + +static sense_reason_t +spc_emulate_testunitready(struct se_cmd *cmd) +{ + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; +} + +static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev) +{ + if (!target_check_fua(dev)) + usage_bits[1] &= ~0x18; + else + usage_bits[1] |= 0x18; +} + +static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev) +{ + if (!target_check_fua(dev)) + usage_bits[10] &= ~0x18; + else + usage_bits[10] |= 0x18; +} + +static struct target_opcode_descriptor tcm_opcode_read6 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_6, + .cdb_size = 6, + .usage_bits = {READ_6, 0x1f, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_read10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_10, + .cdb_size = 10, + .usage_bits = {READ_10, 0xf8, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_read12 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_12, + .cdb_size = 12, + .usage_bits = {READ_12, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_read16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_16, + .cdb_size = 16, + .usage_bits = {READ_16, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write6 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_6, + .cdb_size = 6, + .usage_bits = {WRITE_6, 0x1f, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_write10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_10, + .cdb_size = 10, + .usage_bits = {WRITE_10, 0xf8, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write_verify10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_VERIFY, + .cdb_size = 10, + .usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write12 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_12, + .cdb_size = 12, + .usage_bits = {WRITE_12, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_16, + .cdb_size = 16, + .usage_bits = {WRITE_16, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_write_verify16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_VERIFY_16, + .cdb_size = 16, + .usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + struct se_device *dev = cmd->se_dev; + + return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) || + !!ops->execute_write_same; +} + +static struct target_opcode_descriptor tcm_opcode_write_same32 = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = VARIABLE_LENGTH_CMD, + .service_action = WRITE_SAME_32, + .cdb_size = 32, + .usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00, + 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18, + 0x00, WRITE_SAME_32, 0xe8, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff}, + .enabled = tcm_is_ws_enabled, + .update_usage_bits = set_dpofua_usage_bits32, +}; + +static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_caw; +} + +static struct target_opcode_descriptor tcm_opcode_compare_write = { + .support = SCSI_SUPPORT_FULL, + .opcode = COMPARE_AND_WRITE, + .cdb_size = 16, + .usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, + 0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .enabled = tcm_is_caw_enabled, + .update_usage_bits = set_dpofua_usage_bits, +}; + +static struct target_opcode_descriptor tcm_opcode_read_capacity = { + .support = SCSI_SUPPORT_FULL, + .opcode = READ_CAPACITY, + .cdb_size = 10, + .usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, 0x00, + 0x01, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_read_capacity16 = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = SERVICE_ACTION_IN_16, + .service_action = SAI_READ_CAPACITY_16, + .cdb_size = 16, + .usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + spin_lock(&dev->t10_alua.lba_map_lock); + if (list_empty(&dev->t10_alua.lba_map_list)) { + spin_unlock(&dev->t10_alua.lba_map_lock); + return false; + } + spin_unlock(&dev->t10_alua.lba_map_lock); + return true; +} + +static struct target_opcode_descriptor tcm_opcode_read_report_refferals = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = SERVICE_ACTION_IN_16, + .service_action = SAI_REPORT_REFERRALS, + .cdb_size = 16, + .usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_rep_ref_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_sync_cache = { + .support = SCSI_SUPPORT_FULL, + .opcode = SYNCHRONIZE_CACHE, + .cdb_size = 10, + .usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_sync_cache16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = SYNCHRONIZE_CACHE_16, + .cdb_size = 16, + .usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + struct se_device *dev = cmd->se_dev; + + return ops->execute_unmap && dev->dev_attrib.emulate_tpu; +} + +static struct target_opcode_descriptor tcm_opcode_unmap = { + .support = SCSI_SUPPORT_FULL, + .opcode = UNMAP, + .cdb_size = 10, + .usage_bits = {UNMAP, 0x00, 0x00, 0x00, + 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_unmap_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_write_same = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_SAME, + .cdb_size = 10, + .usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_ws_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_write_same16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = WRITE_SAME_16, + .cdb_size = 16, + .usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, + .enabled = tcm_is_ws_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_verify = { + .support = SCSI_SUPPORT_FULL, + .opcode = VERIFY, + .cdb_size = 10, + .usage_bits = {VERIFY, 0x00, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_verify16 = { + .support = SCSI_SUPPORT_FULL, + .opcode = VERIFY_16, + .cdb_size = 16, + .usage_bits = {VERIFY_16, 0x00, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_start_stop = { + .support = SCSI_SUPPORT_FULL, + .opcode = START_STOP, + .cdb_size = 6, + .usage_bits = {START_STOP, 0x01, 0x00, 0x00, + 0x01, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_select = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SELECT, + .cdb_size = 6, + .usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_select10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SELECT_10, + .cdb_size = 10, + .usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_sense = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SENSE, + .cdb_size = 6, + .usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_mode_sense10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = MODE_SENSE_10, + .cdb_size = 10, + .usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_pri_read_keys = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_READ_KEYS, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_READ_RESERVATION, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + if (!dev->dev_attrib.emulate_pr) + return false; + + if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) + return true; + + switch (descr->opcode) { + case RESERVE: + case RESERVE_10: + case RELEASE: + case RELEASE_10: + /* + * The pr_ops which are used by the backend modules don't + * support these commands. + */ + return false; + case PERSISTENT_RESERVE_OUT: + switch (descr->service_action) { + case PRO_REGISTER_AND_MOVE: + case PRO_REPLACE_LOST_RESERVATION: + /* + * The backend modules don't have access to ports and + * I_T nexuses so they can't handle these type of + * requests. + */ + return false; + } + break; + case PERSISTENT_RESERVE_IN: + if (descr->service_action == PRI_READ_FULL_STATUS) + return false; + break; + } + + return true; +} + +static struct target_opcode_descriptor tcm_opcode_pri_read_caps = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_REPORT_CAPABILITIES, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_IN, + .service_action = PRI_READ_FULL_STATUS, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_register = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_REGISTER, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_reserve = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_RESERVE, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_release = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_RELEASE, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_clear = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_CLEAR, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_preempt = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_PREEMPT, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_PREEMPT_AND_ABORT, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY, + .cdb_size = 10, + .usage_bits = { + PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY, + 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_pro_register_move = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = PERSISTENT_RESERVE_OUT, + .service_action = PRO_REGISTER_AND_MOVE, + .cdb_size = 10, + .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_release = { + .support = SCSI_SUPPORT_FULL, + .opcode = RELEASE, + .cdb_size = 6, + .usage_bits = {RELEASE, 0x00, 0x00, 0x00, + 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_release10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = RELEASE_10, + .cdb_size = 10, + .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_reserve = { + .support = SCSI_SUPPORT_FULL, + .opcode = RESERVE, + .cdb_size = 6, + .usage_bits = {RESERVE, 0x00, 0x00, 0x00, + 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_reserve10 = { + .support = SCSI_SUPPORT_FULL, + .opcode = RESERVE_10, + .cdb_size = 10, + .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, + 0xff, SCSI_CONTROL_MASK}, + .enabled = tcm_is_pr_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_request_sense = { + .support = SCSI_SUPPORT_FULL, + .opcode = REQUEST_SENSE, + .cdb_size = 6, + .usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00, + 0xff, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_inquiry = { + .support = SCSI_SUPPORT_FULL, + .opcode = INQUIRY, + .cdb_size = 6, + .usage_bits = {INQUIRY, 0x01, 0xff, 0xff, + 0xff, SCSI_CONTROL_MASK}, +}; + +static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_3pc; +} + +static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = EXTENDED_COPY, + .cdb_size = 16, + .usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_3pc_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = RECEIVE_COPY_RESULTS, + .service_action = RCR_SA_OPERATING_PARAMETERS, + .cdb_size = 16, + .usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_3pc_enabled, +}; + +static struct target_opcode_descriptor tcm_opcode_report_luns = { + .support = SCSI_SUPPORT_FULL, + .opcode = REPORT_LUNS, + .cdb_size = 12, + .usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_test_unit_ready = { + .support = SCSI_SUPPORT_FULL, + .opcode = TEST_UNIT_READY, + .cdb_size = 6, + .usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00, + 0x00, SCSI_CONTROL_MASK}, +}; + +static struct target_opcode_descriptor tcm_opcode_report_target_pgs = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = MAINTENANCE_IN, + .service_action = MI_REPORT_TARGET_PGS, + .cdb_size = 12, + .usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, +}; + +static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + return dev->dev_attrib.emulate_rsoc; +} + +static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = MAINTENANCE_IN, + .service_action = MI_REPORT_SUPPORTED_OPERATION_CODES, + .cdb_size = 12, + .usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES, + 0x87, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = spc_rsoc_enabled, +}; + +static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) +{ + struct t10_alua_tg_pt_gp *l_tg_pt_gp; + struct se_lun *l_lun = cmd->se_lun; + + rcu_read_lock(); + l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); + if (!l_tg_pt_gp) { + rcu_read_unlock(); + return false; + } + if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { + rcu_read_unlock(); + return false; + } + rcu_read_unlock(); + + return true; +} + +static struct target_opcode_descriptor tcm_opcode_set_tpg = { + .support = SCSI_SUPPORT_FULL, + .serv_action_valid = 1, + .opcode = MAINTENANCE_OUT, + .service_action = MO_SET_TARGET_PGS, + .cdb_size = 12, + .usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, + .enabled = tcm_is_set_tpg_enabled, +}; + +static struct target_opcode_descriptor *tcm_supported_opcodes[] = { + &tcm_opcode_read6, + &tcm_opcode_read10, + &tcm_opcode_read12, + &tcm_opcode_read16, + &tcm_opcode_write6, + &tcm_opcode_write10, + &tcm_opcode_write_verify10, + &tcm_opcode_write12, + &tcm_opcode_write16, + &tcm_opcode_write_verify16, + &tcm_opcode_write_same32, + &tcm_opcode_compare_write, + &tcm_opcode_read_capacity, + &tcm_opcode_read_capacity16, + &tcm_opcode_read_report_refferals, + &tcm_opcode_sync_cache, + &tcm_opcode_sync_cache16, + &tcm_opcode_unmap, + &tcm_opcode_write_same, + &tcm_opcode_write_same16, + &tcm_opcode_verify, + &tcm_opcode_verify16, + &tcm_opcode_start_stop, + &tcm_opcode_mode_select, + &tcm_opcode_mode_select10, + &tcm_opcode_mode_sense, + &tcm_opcode_mode_sense10, + &tcm_opcode_pri_read_keys, + &tcm_opcode_pri_read_resrv, + &tcm_opcode_pri_read_caps, + &tcm_opcode_pri_read_full_status, + &tcm_opcode_pro_register, + &tcm_opcode_pro_reserve, + &tcm_opcode_pro_release, + &tcm_opcode_pro_clear, + &tcm_opcode_pro_preempt, + &tcm_opcode_pro_preempt_abort, + &tcm_opcode_pro_reg_ign_exist, + &tcm_opcode_pro_register_move, + &tcm_opcode_release, + &tcm_opcode_release10, + &tcm_opcode_reserve, + &tcm_opcode_reserve10, + &tcm_opcode_request_sense, + &tcm_opcode_inquiry, + &tcm_opcode_extended_copy_lid1, + &tcm_opcode_rcv_copy_res_op_params, + &tcm_opcode_report_luns, + &tcm_opcode_test_unit_ready, + &tcm_opcode_report_target_pgs, + &tcm_opcode_report_supp_opcodes, + &tcm_opcode_set_tpg, +}; + +static int +spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp, + struct target_opcode_descriptor *descr) +{ + if (!ctdp) + return 0; + + put_unaligned_be16(0xa, buf); + buf[3] = descr->specific_timeout; + put_unaligned_be32(descr->nominal_timeout, &buf[4]); + put_unaligned_be32(descr->recommended_timeout, &buf[8]); + + return 12; +} + +static int +spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp, + struct target_opcode_descriptor *descr) +{ + int td_size = 0; + + buf[0] = descr->opcode; + + put_unaligned_be16(descr->service_action, &buf[2]); + + buf[5] = (ctdp << 1) | descr->serv_action_valid; + put_unaligned_be16(descr->cdb_size, &buf[6]); + + td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp, + descr); + + return 8 + td_size; +} + +static int +spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp, + struct target_opcode_descriptor *descr, + struct se_device *dev) +{ + int td_size = 0; + + if (!descr) { + buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED; + return 2; + } + + buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL; + put_unaligned_be16(descr->cdb_size, &buf[2]); + memcpy(&buf[4], descr->usage_bits, descr->cdb_size); + if (descr->update_usage_bits) + descr->update_usage_bits(&buf[4], dev); + + td_size = spc_rsoc_encode_command_timeouts_descriptor( + &buf[4 + descr->cdb_size], ctdp, descr); + + return 4 + descr->cdb_size + td_size; +} + +static sense_reason_t +spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) +{ + struct target_opcode_descriptor *descr; + struct se_session *sess = cmd->se_sess; + unsigned char *cdb = cmd->t_task_cdb; + u8 opts = cdb[2] & 0x3; + u8 requested_opcode; + u16 requested_sa; + int i; + + requested_opcode = cdb[3]; + requested_sa = ((u16)cdb[4]) << 8 | cdb[5]; + *opcode = NULL; + + if (opts > 3) { + pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES" + " with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n", + cmd->se_tfo->fabric_name, opts, + cmd->se_lun->unpacked_lun, + sess->se_node_acl->initiatorname); + return TCM_INVALID_CDB_FIELD; + } + + for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { + descr = tcm_supported_opcodes[i]; + if (descr->opcode != requested_opcode) + continue; + + switch (opts) { + case 0x1: + /* + * If the REQUESTED OPERATION CODE field specifies an + * operation code for which the device server implements + * service actions, then the device server shall + * terminate the command with CHECK CONDITION status, + * with the sense key set to ILLEGAL REQUEST, and the + * additional sense code set to INVALID FIELD IN CDB + */ + if (descr->serv_action_valid) + return TCM_INVALID_CDB_FIELD; + + if (!descr->enabled || descr->enabled(descr, cmd)) + *opcode = descr; + break; + case 0x2: + /* + * If the REQUESTED OPERATION CODE field specifies an + * operation code for which the device server does not + * implement service actions, then the device server + * shall terminate the command with CHECK CONDITION + * status, with the sense key set to ILLEGAL REQUEST, + * and the additional sense code set to INVALID FIELD IN CDB. + */ + if (descr->serv_action_valid && + descr->service_action == requested_sa) { + if (!descr->enabled || descr->enabled(descr, + cmd)) + *opcode = descr; + } else if (!descr->serv_action_valid) + return TCM_INVALID_CDB_FIELD; + break; + case 0x3: + /* + * The command support data for the operation code and + * service action a specified in the REQUESTED OPERATION + * CODE field and REQUESTED SERVICE ACTION field shall + * be returned in the one_command parameter data format. + */ + if (descr->service_action == requested_sa) + if (!descr->enabled || descr->enabled(descr, + cmd)) + *opcode = descr; + break; + } + } + + return 0; +} + +static sense_reason_t +spc_emulate_report_supp_op_codes(struct se_cmd *cmd) +{ + int descr_num = ARRAY_SIZE(tcm_supported_opcodes); + struct target_opcode_descriptor *descr = NULL; + unsigned char *cdb = cmd->t_task_cdb; + u8 rctd = (cdb[2] >> 7) & 0x1; + unsigned char *buf = NULL; + int response_length = 0; + u8 opts = cdb[2] & 0x3; + unsigned char *rbuf; + sense_reason_t ret = 0; + int i; + + if (!cmd->se_dev->dev_attrib.emulate_rsoc) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + rbuf = transport_kmap_data_sg(cmd); + if (cmd->data_length && !rbuf) { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + + if (opts == 0) + response_length = 4 + (8 + rctd * 12) * descr_num; + else { + ret = spc_rsoc_get_descr(cmd, &descr); + if (ret) + goto out; + + if (descr) + response_length = 4 + descr->cdb_size + rctd * 12; + else + response_length = 2; + } + + buf = kzalloc(response_length, GFP_KERNEL); + if (!buf) { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + response_length = 0; + + if (opts == 0) { + response_length += 4; + + for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { + descr = tcm_supported_opcodes[i]; + if (descr->enabled && !descr->enabled(descr, cmd)) + continue; + + response_length += spc_rsoc_encode_command_descriptor( + &buf[response_length], rctd, descr); + } + put_unaligned_be32(response_length - 3, buf); + } else { + response_length = spc_rsoc_encode_one_command_descriptor( + &buf[response_length], rctd, descr, + cmd->se_dev); + } + + memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length)); +out: + kfree(buf); + transport_kunmap_data_sg(cmd); + + if (!ret) + target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length); + return ret; +} + +sense_reason_t +spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *cdb = cmd->t_task_cdb; + + switch (cdb[0]) { + case RESERVE: + case RESERVE_10: + case RELEASE: + case RELEASE_10: + if (!dev->dev_attrib.emulate_pr) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) + return TCM_UNSUPPORTED_SCSI_OPCODE; + break; + case PERSISTENT_RESERVE_IN: + case PERSISTENT_RESERVE_OUT: + if (!dev->dev_attrib.emulate_pr) + return TCM_UNSUPPORTED_SCSI_OPCODE; + break; + } + + switch (cdb[0]) { + case MODE_SELECT: + *size = cdb[4]; + cmd->execute_cmd = spc_emulate_modeselect; + break; + case MODE_SELECT_10: + *size = get_unaligned_be16(&cdb[7]); + cmd->execute_cmd = spc_emulate_modeselect; + break; + case MODE_SENSE: + *size = cdb[4]; + cmd->execute_cmd = spc_emulate_modesense; + break; + case MODE_SENSE_10: + *size = get_unaligned_be16(&cdb[7]); + cmd->execute_cmd = spc_emulate_modesense; + break; + case LOG_SELECT: + case LOG_SENSE: + *size = get_unaligned_be16(&cdb[7]); + break; + case PERSISTENT_RESERVE_IN: + *size = get_unaligned_be16(&cdb[7]); + cmd->execute_cmd = target_scsi3_emulate_pr_in; + break; + case PERSISTENT_RESERVE_OUT: + *size = get_unaligned_be32(&cdb[5]); + cmd->execute_cmd = target_scsi3_emulate_pr_out; + break; + case RELEASE: + case RELEASE_10: + if (cdb[0] == RELEASE_10) + *size = get_unaligned_be16(&cdb[7]); + else + *size = cmd->data_length; + + cmd->execute_cmd = target_scsi2_reservation_release; + break; + case RESERVE: + case RESERVE_10: + /* + * The SPC-2 RESERVE does not contain a size in the SCSI CDB. + * Assume the passthrough or $FABRIC_MOD will tell us about it. + */ + if (cdb[0] == RESERVE_10) + *size = get_unaligned_be16(&cdb[7]); + else + *size = cmd->data_length; + + cmd->execute_cmd = target_scsi2_reservation_reserve; + break; + case REQUEST_SENSE: + *size = cdb[4]; + cmd->execute_cmd = spc_emulate_request_sense; + break; + case INQUIRY: + *size = get_unaligned_be16(&cdb[3]); + + /* + * Do implicit HEAD_OF_QUEUE processing for INQUIRY. + * See spc4r17 section 5.3 + */ + cmd->sam_task_attr = TCM_HEAD_TAG; + cmd->execute_cmd = spc_emulate_inquiry; + break; + case SECURITY_PROTOCOL_IN: + case SECURITY_PROTOCOL_OUT: + *size = get_unaligned_be32(&cdb[6]); + break; + case EXTENDED_COPY: + *size = get_unaligned_be32(&cdb[10]); + cmd->execute_cmd = target_do_xcopy; + break; + case RECEIVE_COPY_RESULTS: + *size = get_unaligned_be32(&cdb[10]); + cmd->execute_cmd = target_do_receive_copy_results; + break; + case READ_ATTRIBUTE: + case WRITE_ATTRIBUTE: + *size = get_unaligned_be32(&cdb[10]); + break; + case RECEIVE_DIAGNOSTIC: + case SEND_DIAGNOSTIC: + *size = get_unaligned_be16(&cdb[3]); + break; + case WRITE_BUFFER: + *size = get_unaligned_be24(&cdb[6]); + break; + case REPORT_LUNS: + cmd->execute_cmd = spc_emulate_report_luns; + *size = get_unaligned_be32(&cdb[6]); + /* + * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS + * See spc4r17 section 5.3 + */ + cmd->sam_task_attr = TCM_HEAD_TAG; + break; + case TEST_UNIT_READY: + cmd->execute_cmd = spc_emulate_testunitready; + *size = 0; + break; + case MAINTENANCE_IN: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { + /* + * MAINTENANCE_IN from SCC-2 + * Check for emulated MI_REPORT_TARGET_PGS + */ + if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { + cmd->execute_cmd = + target_emulate_report_target_port_groups; + } + if ((cdb[1] & 0x1f) == + MI_REPORT_SUPPORTED_OPERATION_CODES) + cmd->execute_cmd = + spc_emulate_report_supp_op_codes; + *size = get_unaligned_be32(&cdb[6]); + } else { + /* + * GPCMD_SEND_KEY from multi media commands + */ + *size = get_unaligned_be16(&cdb[8]); + } + break; + case MAINTENANCE_OUT: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { + /* + * MAINTENANCE_OUT from SCC-2 + * Check for emulated MO_SET_TARGET_PGS. + */ + if (cdb[1] == MO_SET_TARGET_PGS) { + cmd->execute_cmd = + target_emulate_set_target_port_groups; + } + *size = get_unaligned_be32(&cdb[6]); + } else { + /* + * GPCMD_SEND_KEY from multi media commands + */ + *size = get_unaligned_be16(&cdb[8]); + } + break; + default: + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + return 0; +} +EXPORT_SYMBOL(spc_parse_cdb); diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c new file mode 100644 index 0000000000..c42cbde8a3 --- /dev/null +++ b/drivers/target/target_core_stat.c @@ -0,0 +1,1365 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_stat.c + * + * Modern ConfigFS group context specific statistics based on original + * target_core_mib.c code + * + * (c) Copyright 2006-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ******************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/string.h> +#include <linux/utsname.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/configfs.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" + +#ifndef INITIAL_JIFFIES +#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) +#endif + +#define SCSI_LU_INDEX 1 +#define LU_COUNT 1 + +/* + * SCSI Device Table + */ + +static struct se_device *to_stat_dev(struct config_item *item) +{ + struct se_dev_stat_grps *sgrps = container_of(to_config_group(item), + struct se_dev_stat_grps, scsi_dev_group); + return container_of(sgrps, struct se_device, dev_stat_grps); +} + +static ssize_t target_stat_inst_show(struct config_item *item, char *page) +{ + struct se_hba *hba = to_stat_dev(item)->se_hba; + + return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); +} + +static ssize_t target_stat_indx_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->dev_index); +} + +static ssize_t target_stat_role_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "Target\n"); +} + +static ssize_t target_stat_ports_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->export_count); +} + +CONFIGFS_ATTR_RO(target_stat_, inst); +CONFIGFS_ATTR_RO(target_stat_, indx); +CONFIGFS_ATTR_RO(target_stat_, role); +CONFIGFS_ATTR_RO(target_stat_, ports); + +static struct configfs_attribute *target_stat_scsi_dev_attrs[] = { + &target_stat_attr_inst, + &target_stat_attr_indx, + &target_stat_attr_role, + &target_stat_attr_ports, + NULL, +}; + +static const struct config_item_type target_stat_scsi_dev_cit = { + .ct_attrs = target_stat_scsi_dev_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * SCSI Target Device Table + */ +static struct se_device *to_stat_tgt_dev(struct config_item *item) +{ + struct se_dev_stat_grps *sgrps = container_of(to_config_group(item), + struct se_dev_stat_grps, scsi_tgt_dev_group); + return container_of(sgrps, struct se_device, dev_stat_grps); +} + +static ssize_t target_stat_tgt_inst_show(struct config_item *item, char *page) +{ + struct se_hba *hba = to_stat_tgt_dev(item)->se_hba; + + return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); +} + +static ssize_t target_stat_tgt_indx_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", to_stat_tgt_dev(item)->dev_index); +} + +static ssize_t target_stat_tgt_num_lus_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); +} + +static ssize_t target_stat_tgt_status_show(struct config_item *item, + char *page) +{ + if (to_stat_tgt_dev(item)->export_count) + return snprintf(page, PAGE_SIZE, "activated"); + else + return snprintf(page, PAGE_SIZE, "deactivated"); +} + +static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item, + char *page) +{ + int non_accessible_lus; + + if (to_stat_tgt_dev(item)->export_count) + non_accessible_lus = 0; + else + non_accessible_lus = 1; + + return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); +} + +static ssize_t target_stat_tgt_resets_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->num_resets)); +} + +static ssize_t target_stat_tgt_aborts_complete_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_complete)); +} + +static ssize_t target_stat_tgt_aborts_no_task_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&to_stat_tgt_dev(item)->aborts_no_task)); +} + +CONFIGFS_ATTR_RO(target_stat_tgt_, inst); +CONFIGFS_ATTR_RO(target_stat_tgt_, indx); +CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus); +CONFIGFS_ATTR_RO(target_stat_tgt_, status); +CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus); +CONFIGFS_ATTR_RO(target_stat_tgt_, resets); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_complete); +CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_no_task); + +static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { + &target_stat_tgt_attr_inst, + &target_stat_tgt_attr_indx, + &target_stat_tgt_attr_num_lus, + &target_stat_tgt_attr_status, + &target_stat_tgt_attr_non_access_lus, + &target_stat_tgt_attr_resets, + &target_stat_tgt_attr_aborts_complete, + &target_stat_tgt_attr_aborts_no_task, + NULL, +}; + +static const struct config_item_type target_stat_scsi_tgt_dev_cit = { + .ct_attrs = target_stat_scsi_tgt_dev_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * SCSI Logical Unit Table + */ + +static struct se_device *to_stat_lu_dev(struct config_item *item) +{ + struct se_dev_stat_grps *sgrps = container_of(to_config_group(item), + struct se_dev_stat_grps, scsi_lu_group); + return container_of(sgrps, struct se_device, dev_stat_grps); +} + +static ssize_t target_stat_lu_inst_show(struct config_item *item, char *page) +{ + struct se_hba *hba = to_stat_lu_dev(item)->se_hba; + + return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); +} + +static ssize_t target_stat_lu_dev_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", + to_stat_lu_dev(item)->dev_index); +} + +static ssize_t target_stat_lu_indx_show(struct config_item *item, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); +} + +static ssize_t target_stat_lu_lun_show(struct config_item *item, char *page) +{ + /* FIXME: scsiLuDefaultLun */ + return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); +} + +static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuWwnName */ + return snprintf(page, PAGE_SIZE, "%s\n", + (strlen(dev->t10_wwn.unit_serial)) ? + dev->t10_wwn.unit_serial : "None"); +} + +static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN) + "s\n", dev->t10_wwn.vendor); +} + +static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN) + "s\n", dev->t10_wwn.model); +} + +static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN) + "s\n", dev->t10_wwn.revision); +} + +static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuPeripheralType */ + return snprintf(page, PAGE_SIZE, "%u\n", + dev->transport->get_device_type(dev)); +} + +static ssize_t target_stat_lu_status_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuStatus */ + return snprintf(page, PAGE_SIZE, "%s\n", + (dev->export_count) ? "available" : "notavailable"); +} + +static ssize_t target_stat_lu_state_bit_show(struct config_item *item, + char *page) +{ + /* scsiLuState */ + return snprintf(page, PAGE_SIZE, "exposed\n"); +} + +static ssize_t target_stat_lu_num_cmds_show(struct config_item *item, + char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuNumCommands */ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_cmds)); +} + +static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item, + char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuReadMegaBytes */ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->read_bytes) >> 20); +} + +static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item, + char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuWrittenMegaBytes */ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->write_bytes) >> 20); +} + +static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuInResets */ + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_resets)); +} + +static ssize_t target_stat_lu_full_stat_show(struct config_item *item, + char *page) +{ + /* FIXME: scsiLuOutTaskSetFullStatus */ + return snprintf(page, PAGE_SIZE, "%u\n", 0); +} + +static ssize_t target_stat_lu_hs_num_cmds_show(struct config_item *item, + char *page) +{ + /* FIXME: scsiLuHSInCommands */ + return snprintf(page, PAGE_SIZE, "%u\n", 0); +} + +static ssize_t target_stat_lu_creation_time_show(struct config_item *item, + char *page) +{ + struct se_device *dev = to_stat_lu_dev(item); + + /* scsiLuCreationTime */ + return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - + INITIAL_JIFFIES) * 100 / HZ)); +} + +CONFIGFS_ATTR_RO(target_stat_lu_, inst); +CONFIGFS_ATTR_RO(target_stat_lu_, dev); +CONFIGFS_ATTR_RO(target_stat_lu_, indx); +CONFIGFS_ATTR_RO(target_stat_lu_, lun); +CONFIGFS_ATTR_RO(target_stat_lu_, lu_name); +CONFIGFS_ATTR_RO(target_stat_lu_, vend); +CONFIGFS_ATTR_RO(target_stat_lu_, prod); +CONFIGFS_ATTR_RO(target_stat_lu_, rev); +CONFIGFS_ATTR_RO(target_stat_lu_, dev_type); +CONFIGFS_ATTR_RO(target_stat_lu_, status); +CONFIGFS_ATTR_RO(target_stat_lu_, state_bit); +CONFIGFS_ATTR_RO(target_stat_lu_, num_cmds); +CONFIGFS_ATTR_RO(target_stat_lu_, read_mbytes); +CONFIGFS_ATTR_RO(target_stat_lu_, write_mbytes); +CONFIGFS_ATTR_RO(target_stat_lu_, resets); +CONFIGFS_ATTR_RO(target_stat_lu_, full_stat); +CONFIGFS_ATTR_RO(target_stat_lu_, hs_num_cmds); +CONFIGFS_ATTR_RO(target_stat_lu_, creation_time); + +static struct configfs_attribute *target_stat_scsi_lu_attrs[] = { + &target_stat_lu_attr_inst, + &target_stat_lu_attr_dev, + &target_stat_lu_attr_indx, + &target_stat_lu_attr_lun, + &target_stat_lu_attr_lu_name, + &target_stat_lu_attr_vend, + &target_stat_lu_attr_prod, + &target_stat_lu_attr_rev, + &target_stat_lu_attr_dev_type, + &target_stat_lu_attr_status, + &target_stat_lu_attr_state_bit, + &target_stat_lu_attr_num_cmds, + &target_stat_lu_attr_read_mbytes, + &target_stat_lu_attr_write_mbytes, + &target_stat_lu_attr_resets, + &target_stat_lu_attr_full_stat, + &target_stat_lu_attr_hs_num_cmds, + &target_stat_lu_attr_creation_time, + NULL, +}; + +static const struct config_item_type target_stat_scsi_lu_cit = { + .ct_attrs = target_stat_scsi_lu_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Called from target_core_configfs.c:target_core_make_subdev() to setup + * the target statistics groups + configfs CITs located in target_core_stat.c + */ +void target_stat_setup_dev_default_groups(struct se_device *dev) +{ + config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group, + "scsi_dev", &target_stat_scsi_dev_cit); + configfs_add_default_group(&dev->dev_stat_grps.scsi_dev_group, + &dev->dev_stat_grps.stat_group); + + config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group, + "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); + configfs_add_default_group(&dev->dev_stat_grps.scsi_tgt_dev_group, + &dev->dev_stat_grps.stat_group); + + config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group, + "scsi_lu", &target_stat_scsi_lu_cit); + configfs_add_default_group(&dev->dev_stat_grps.scsi_lu_group, + &dev->dev_stat_grps.stat_group); +} + +/* + * SCSI Port Table + */ + +static struct se_lun *to_stat_port(struct config_item *item) +{ + struct se_port_stat_grps *pgrps = container_of(to_config_group(item), + struct se_port_stat_grps, scsi_port_group); + return container_of(pgrps, struct se_lun, port_stat_grps); +} + +static ssize_t target_stat_port_inst_show(struct config_item *item, char *page) +{ + struct se_lun *lun = to_stat_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_port_dev_show(struct config_item *item, char *page) +{ + struct se_lun *lun = to_stat_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_port_indx_show(struct config_item *item, char *page) +{ + struct se_lun *lun = to_stat_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_port_role_show(struct config_item *item, char *page) +{ + struct se_lun *lun = to_stat_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_port_busy_count_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) { + /* FIXME: scsiPortBusyStatuses */ + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); + } + rcu_read_unlock(); + return ret; +} + +CONFIGFS_ATTR_RO(target_stat_port_, inst); +CONFIGFS_ATTR_RO(target_stat_port_, dev); +CONFIGFS_ATTR_RO(target_stat_port_, indx); +CONFIGFS_ATTR_RO(target_stat_port_, role); +CONFIGFS_ATTR_RO(target_stat_port_, busy_count); + +static struct configfs_attribute *target_stat_scsi_port_attrs[] = { + &target_stat_port_attr_inst, + &target_stat_port_attr_dev, + &target_stat_port_attr_indx, + &target_stat_port_attr_role, + &target_stat_port_attr_busy_count, + NULL, +}; + +static const struct config_item_type target_stat_scsi_port_cit = { + .ct_attrs = target_stat_scsi_port_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * SCSI Target Port Table + */ +static struct se_lun *to_stat_tgt_port(struct config_item *item) +{ + struct se_port_stat_grps *pgrps = container_of(to_config_group(item), + struct se_port_stat_grps, scsi_tgt_port_group); + return container_of(pgrps, struct se_lun, port_stat_grps); +} + +static ssize_t target_stat_tgt_port_inst_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_dev_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_indx_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_name_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_portal_group *tpg = lun->lun_tpg; + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", + tpg->se_tpg_tfo->fabric_name, + lun->lun_tpg->tpg_rtpi); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_portal_group *tpg = lun->lun_tpg; + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", + tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", + tpg->se_tpg_tfo->tpg_get_tag(tpg)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&lun->lun_stats.cmd_pdus)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", + (u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", + (u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_stat_tgt_port(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) { + /* FIXME: scsiTgtPortHsInCommands */ + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); + } + rcu_read_unlock(); + return ret; +} + +CONFIGFS_ATTR_RO(target_stat_tgt_port_, inst); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, dev); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, indx); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, name); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, port_index); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, in_cmds); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, write_mbytes); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, read_mbytes); +CONFIGFS_ATTR_RO(target_stat_tgt_port_, hs_in_cmds); + +static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = { + &target_stat_tgt_port_attr_inst, + &target_stat_tgt_port_attr_dev, + &target_stat_tgt_port_attr_indx, + &target_stat_tgt_port_attr_name, + &target_stat_tgt_port_attr_port_index, + &target_stat_tgt_port_attr_in_cmds, + &target_stat_tgt_port_attr_write_mbytes, + &target_stat_tgt_port_attr_read_mbytes, + &target_stat_tgt_port_attr_hs_in_cmds, + NULL, +}; + +static const struct config_item_type target_stat_scsi_tgt_port_cit = { + .ct_attrs = target_stat_scsi_tgt_port_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * SCSI Transport Table + */ +static struct se_lun *to_transport_stat(struct config_item *item) +{ + struct se_port_stat_grps *pgrps = container_of(to_config_group(item), + struct se_port_stat_grps, scsi_transport_group); + return container_of(pgrps, struct se_lun, port_stat_grps); +} + +static ssize_t target_stat_transport_inst_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_transport_device_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + struct se_portal_group *tpg = lun->lun_tpg; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) { + /* scsiTransportType */ + ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", + tpg->se_tpg_tfo->fabric_name); + } + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_transport_indx_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + struct se_portal_group *tpg = lun->lun_tpg; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_transport_dev_name_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + struct se_portal_group *tpg = lun->lun_tpg; + struct t10_wwn *wwn; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) { + wwn = &dev->t10_wwn; + /* scsiTransportDevName */ + ret = snprintf(page, PAGE_SIZE, "%s+%s\n", + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + (strlen(wwn->unit_serial)) ? wwn->unit_serial : + wwn->vendor); + } + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_transport_proto_id_show(struct config_item *item, + char *page) +{ + struct se_lun *lun = to_transport_stat(item); + struct se_device *dev; + struct se_portal_group *tpg = lun->lun_tpg; + ssize_t ret = -ENODEV; + + rcu_read_lock(); + dev = rcu_dereference(lun->lun_se_dev); + if (dev) + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id); + rcu_read_unlock(); + return ret; +} + +CONFIGFS_ATTR_RO(target_stat_transport_, inst); +CONFIGFS_ATTR_RO(target_stat_transport_, device); +CONFIGFS_ATTR_RO(target_stat_transport_, indx); +CONFIGFS_ATTR_RO(target_stat_transport_, dev_name); +CONFIGFS_ATTR_RO(target_stat_transport_, proto_id); + +static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { + &target_stat_transport_attr_inst, + &target_stat_transport_attr_device, + &target_stat_transport_attr_indx, + &target_stat_transport_attr_dev_name, + &target_stat_transport_attr_proto_id, + NULL, +}; + +static const struct config_item_type target_stat_scsi_transport_cit = { + .ct_attrs = target_stat_scsi_transport_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup + * the target port statistics groups + configfs CITs located in target_core_stat.c + */ +void target_stat_setup_port_default_groups(struct se_lun *lun) +{ + config_group_init_type_name(&lun->port_stat_grps.scsi_port_group, + "scsi_port", &target_stat_scsi_port_cit); + configfs_add_default_group(&lun->port_stat_grps.scsi_port_group, + &lun->port_stat_grps.stat_group); + + config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group, + "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); + configfs_add_default_group(&lun->port_stat_grps.scsi_tgt_port_group, + &lun->port_stat_grps.stat_group); + + config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group, + "scsi_transport", &target_stat_scsi_transport_cit); + configfs_add_default_group(&lun->port_stat_grps.scsi_transport_group, + &lun->port_stat_grps.stat_group); +} + +/* + * SCSI Authorized Initiator Table + */ + +static struct se_lun_acl *auth_to_lacl(struct config_item *item) +{ + struct se_ml_stat_grps *lgrps = container_of(to_config_group(item), + struct se_ml_stat_grps, scsi_auth_intr_group); + return container_of(lgrps, struct se_lun_acl, ml_stat_grps); +} + +static ssize_t target_stat_auth_inst_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + struct se_portal_group *tpg; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + tpg = nacl->se_tpg; + /* scsiInstIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_dev_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + + /* scsiDeviceIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_port_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + struct se_portal_group *tpg; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + tpg = nacl->se_tpg; + /* scsiAuthIntrTgtPortIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_indx_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_dev_or_port_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrDevOrPort */ + ret = snprintf(page, PAGE_SIZE, "%u\n", 1); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_intr_name_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrName */ + ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_map_indx_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* FIXME: scsiAuthIntrLunMapIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_att_count_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrAttachedTimes */ + ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_num_cmds_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrOutCommands */ + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&deve->total_cmds)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrReadMegaBytes */ + ret = snprintf(page, PAGE_SIZE, "%u\n", + (u32)(atomic_long_read(&deve->read_bytes) >> 20)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrWrittenMegaBytes */ + ret = snprintf(page, PAGE_SIZE, "%u\n", + (u32)(atomic_long_read(&deve->write_bytes) >> 20)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* FIXME: scsiAuthIntrHSOutCommands */ + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_creation_time_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAuthIntrLastCreation */ + ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - + INITIAL_JIFFIES) * 100 / HZ)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_auth_row_status_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = auth_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* FIXME: scsiAuthIntrRowStatus */ + ret = snprintf(page, PAGE_SIZE, "Ready\n"); + rcu_read_unlock(); + return ret; +} + +CONFIGFS_ATTR_RO(target_stat_auth_, inst); +CONFIGFS_ATTR_RO(target_stat_auth_, dev); +CONFIGFS_ATTR_RO(target_stat_auth_, port); +CONFIGFS_ATTR_RO(target_stat_auth_, indx); +CONFIGFS_ATTR_RO(target_stat_auth_, dev_or_port); +CONFIGFS_ATTR_RO(target_stat_auth_, intr_name); +CONFIGFS_ATTR_RO(target_stat_auth_, map_indx); +CONFIGFS_ATTR_RO(target_stat_auth_, att_count); +CONFIGFS_ATTR_RO(target_stat_auth_, num_cmds); +CONFIGFS_ATTR_RO(target_stat_auth_, read_mbytes); +CONFIGFS_ATTR_RO(target_stat_auth_, write_mbytes); +CONFIGFS_ATTR_RO(target_stat_auth_, hs_num_cmds); +CONFIGFS_ATTR_RO(target_stat_auth_, creation_time); +CONFIGFS_ATTR_RO(target_stat_auth_, row_status); + +static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = { + &target_stat_auth_attr_inst, + &target_stat_auth_attr_dev, + &target_stat_auth_attr_port, + &target_stat_auth_attr_indx, + &target_stat_auth_attr_dev_or_port, + &target_stat_auth_attr_intr_name, + &target_stat_auth_attr_map_indx, + &target_stat_auth_attr_att_count, + &target_stat_auth_attr_num_cmds, + &target_stat_auth_attr_read_mbytes, + &target_stat_auth_attr_write_mbytes, + &target_stat_auth_attr_hs_num_cmds, + &target_stat_auth_attr_creation_time, + &target_stat_auth_attr_row_status, + NULL, +}; + +static const struct config_item_type target_stat_scsi_auth_intr_cit = { + .ct_attrs = target_stat_scsi_auth_intr_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * SCSI Attached Initiator Port Table + */ + +static struct se_lun_acl *iport_to_lacl(struct config_item *item) +{ + struct se_ml_stat_grps *lgrps = container_of(to_config_group(item), + struct se_ml_stat_grps, scsi_att_intr_port_group); + return container_of(lgrps, struct se_lun_acl, ml_stat_grps); +} + +static ssize_t target_stat_iport_inst_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = iport_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + struct se_portal_group *tpg; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + tpg = nacl->se_tpg; + /* scsiInstIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_iport_dev_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = iport_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + + /* scsiDeviceIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_iport_port_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = iport_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + struct se_portal_group *tpg; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + tpg = nacl->se_tpg; + /* scsiPortIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_iport_indx_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = iport_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_session *se_sess; + struct se_portal_group *tpg; + ssize_t ret; + + spin_lock_irq(&nacl->nacl_sess_lock); + se_sess = nacl->nacl_sess; + if (!se_sess) { + spin_unlock_irq(&nacl->nacl_sess_lock); + return -ENODEV; + } + + tpg = nacl->se_tpg; + /* scsiAttIntrPortIndex */ + ret = snprintf(page, PAGE_SIZE, "%u\n", + tpg->se_tpg_tfo->sess_get_index(se_sess)); + spin_unlock_irq(&nacl->nacl_sess_lock); + return ret; +} + +static ssize_t target_stat_iport_port_auth_indx_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = iport_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry *deve; + ssize_t ret; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, lacl->mapped_lun); + if (!deve) { + rcu_read_unlock(); + return -ENODEV; + } + /* scsiAttIntrPortAuthIntrIdx */ + ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); + rcu_read_unlock(); + return ret; +} + +static ssize_t target_stat_iport_port_ident_show(struct config_item *item, + char *page) +{ + struct se_lun_acl *lacl = iport_to_lacl(item); + struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_session *se_sess; + struct se_portal_group *tpg; + ssize_t ret; + unsigned char buf[64]; + + spin_lock_irq(&nacl->nacl_sess_lock); + se_sess = nacl->nacl_sess; + if (!se_sess) { + spin_unlock_irq(&nacl->nacl_sess_lock); + return -ENODEV; + } + + tpg = nacl->se_tpg; + /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ + memset(buf, 0, 64); + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) + tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64); + + ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); + spin_unlock_irq(&nacl->nacl_sess_lock); + return ret; +} + +CONFIGFS_ATTR_RO(target_stat_iport_, inst); +CONFIGFS_ATTR_RO(target_stat_iport_, dev); +CONFIGFS_ATTR_RO(target_stat_iport_, port); +CONFIGFS_ATTR_RO(target_stat_iport_, indx); +CONFIGFS_ATTR_RO(target_stat_iport_, port_auth_indx); +CONFIGFS_ATTR_RO(target_stat_iport_, port_ident); + +static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = { + &target_stat_iport_attr_inst, + &target_stat_iport_attr_dev, + &target_stat_iport_attr_port, + &target_stat_iport_attr_indx, + &target_stat_iport_attr_port_auth_indx, + &target_stat_iport_attr_port_ident, + NULL, +}; + +static const struct config_item_type target_stat_scsi_att_intr_port_cit = { + .ct_attrs = target_stat_scsi_ath_intr_port_attrs, + .ct_owner = THIS_MODULE, +}; + +/* + * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup + * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c + */ +void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) +{ + config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group, + "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); + configfs_add_default_group(&lacl->ml_stat_grps.scsi_auth_intr_group, + &lacl->ml_stat_grps.stat_group); + + config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group, + "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); + configfs_add_default_group(&lacl->ml_stat_grps.scsi_att_intr_port_group, + &lacl->ml_stat_grps.stat_group); +} diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c new file mode 100644 index 0000000000..4718db6282 --- /dev/null +++ b/drivers/target/target_core_tmr.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_tmr.c + * + * This file contains SPC-3 task management infrastructure + * + * (c) Copyright 2009-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/export.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" + +int core_tmr_alloc_req( + struct se_cmd *se_cmd, + void *fabric_tmr_ptr, + u8 function, + gfp_t gfp_flags) +{ + struct se_tmr_req *tmr; + + tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags); + if (!tmr) { + pr_err("Unable to allocate struct se_tmr_req\n"); + return -ENOMEM; + } + + se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB; + se_cmd->se_tmr_req = tmr; + tmr->task_cmd = se_cmd; + tmr->fabric_tmr_ptr = fabric_tmr_ptr; + tmr->function = function; + INIT_LIST_HEAD(&tmr->tmr_list); + + return 0; +} +EXPORT_SYMBOL(core_tmr_alloc_req); + +void core_tmr_release_req(struct se_tmr_req *tmr) +{ + kfree(tmr); +} + +static int target_check_cdb_and_preempt(struct list_head *list, + struct se_cmd *cmd) +{ + struct t10_pr_registration *reg; + + if (!list) + return 0; + list_for_each_entry(reg, list, pr_reg_abort_list) { + if (reg->pr_res_key == cmd->pr_res_key) + return 0; + } + + return 1; +} + +static bool __target_check_io_state(struct se_cmd *se_cmd, + struct se_session *tmr_sess, bool tas) +{ + struct se_session *sess = se_cmd->se_sess; + + lockdep_assert_held(&sess->sess_cmd_lock); + + /* + * If command already reached CMD_T_COMPLETE state within + * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, + * this se_cmd has been passed to fabric driver and will + * not be aborted. + * + * Otherwise, obtain a local se_cmd->cmd_kref now for TMR + * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as + * long as se_cmd->cmd_kref is still active unless zero. + */ + spin_lock(&se_cmd->t_state_lock); + if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { + pr_debug("Attempted to abort io tag: %llu already complete or" + " fabric stop, skipping\n", se_cmd->tag); + spin_unlock(&se_cmd->t_state_lock); + return false; + } + se_cmd->transport_state |= CMD_T_ABORTED; + + if ((tmr_sess != se_cmd->se_sess) && tas) + se_cmd->transport_state |= CMD_T_TAS; + + spin_unlock(&se_cmd->t_state_lock); + + return kref_get_unless_zero(&se_cmd->cmd_kref); +} + +void core_tmr_abort_task( + struct se_device *dev, + struct se_tmr_req *tmr, + struct se_session *se_sess) +{ + LIST_HEAD(aborted_list); + struct se_cmd *se_cmd, *next; + unsigned long flags; + bool rc; + u64 ref_tag; + int i; + + for (i = 0; i < dev->queue_cnt; i++) { + flush_work(&dev->queues[i].sq.work); + + spin_lock_irqsave(&dev->queues[i].lock, flags); + list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, + state_list) { + if (se_sess != se_cmd->se_sess) + continue; + + /* + * skip task management functions, including + * tmr->task_cmd + */ + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + continue; + + ref_tag = se_cmd->tag; + if (tmr->ref_task_tag != ref_tag) + continue; + + pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n", + se_cmd->se_tfo->fabric_name, ref_tag); + + spin_lock(&se_sess->sess_cmd_lock); + rc = __target_check_io_state(se_cmd, se_sess, 0); + spin_unlock(&se_sess->sess_cmd_lock); + if (!rc) + continue; + + list_move_tail(&se_cmd->state_list, &aborted_list); + se_cmd->state_active = false; + spin_unlock_irqrestore(&dev->queues[i].lock, flags); + + if (dev->transport->tmr_notify) + dev->transport->tmr_notify(dev, TMR_ABORT_TASK, + &aborted_list); + + list_del_init(&se_cmd->state_list); + target_put_cmd_and_wait(se_cmd); + + pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n", + ref_tag); + tmr->response = TMR_FUNCTION_COMPLETE; + atomic_long_inc(&dev->aborts_complete); + return; + } + spin_unlock_irqrestore(&dev->queues[i].lock, flags); + } + + if (dev->transport->tmr_notify) + dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list); + + printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n", + tmr->ref_task_tag); + tmr->response = TMR_TASK_DOES_NOT_EXIST; + atomic_long_inc(&dev->aborts_no_task); +} + +static void core_tmr_drain_tmr_list( + struct se_device *dev, + struct se_tmr_req *tmr, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_tmr_list); + struct se_session *sess; + struct se_tmr_req *tmr_p, *tmr_pp; + struct se_cmd *cmd; + unsigned long flags; + bool rc; + /* + * Release all pending and outgoing TMRs aside from the received + * LUN_RESET tmr.. + */ + spin_lock_irqsave(&dev->se_tmr_lock, flags); + list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { + if (tmr_p == tmr) + continue; + + cmd = tmr_p->task_cmd; + if (!cmd) { + pr_err("Unable to locate struct se_cmd for TMR\n"); + continue; + } + + /* + * We only execute one LUN_RESET at a time so we can't wait + * on them below. + */ + if (tmr_p->function == TMR_LUN_RESET) + continue; + + /* + * If this function was called with a valid pr_res_key + * parameter (eg: for PROUT PREEMPT_AND_ABORT service action + * skip non registration key matching TMRs. + */ + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) + continue; + + sess = cmd->se_sess; + if (WARN_ON_ONCE(!sess)) + continue; + + spin_lock(&sess->sess_cmd_lock); + rc = __target_check_io_state(cmd, sess, 0); + spin_unlock(&sess->sess_cmd_lock); + + if (!rc) { + printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); + continue; + } + + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); + tmr_p->tmr_dev = NULL; + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) { + list_del_init(&tmr_p->tmr_list); + cmd = tmr_p->task_cmd; + + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," + " Response: 0x%02x, t_state: %d\n", + (preempt_and_abort_list) ? "Preempt" : "", tmr_p, + tmr_p->function, tmr_p->response, cmd->t_state); + + target_put_cmd_and_wait(cmd); + } +} + +/** + * core_tmr_drain_state_list() - abort SCSI commands associated with a device + * + * @dev: Device for which to abort outstanding SCSI commands. + * @prout_cmd: Pointer to the SCSI PREEMPT AND ABORT if this function is called + * to realize the PREEMPT AND ABORT functionality. + * @tmr_sess: Session through which the LUN RESET has been received. + * @tas: Task Aborted Status (TAS) bit from the SCSI control mode page. + * A quote from SPC-4, paragraph "7.5.10 Control mode page": + * "A task aborted status (TAS) bit set to zero specifies that + * aborted commands shall be terminated by the device server + * without any response to the application client. A TAS bit set + * to one specifies that commands aborted by the actions of an I_T + * nexus other than the I_T nexus on which the command was + * received shall be completed with TASK ABORTED status." + * @preempt_and_abort_list: For the PREEMPT AND ABORT functionality, a list + * with registrations that will be preempted. + */ +static void core_tmr_drain_state_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_session *tmr_sess, + bool tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_task_list); + struct se_session *sess; + struct se_cmd *cmd, *next; + unsigned long flags; + int rc, i; + + /* + * Complete outstanding commands with TASK_ABORTED SAM status. + * + * This is following sam4r17, section 5.6 Aborting commands, Table 38 + * for TMR LUN_RESET: + * + * a) "Yes" indicates that each command that is aborted on an I_T nexus + * other than the one that caused the SCSI device condition is + * completed with TASK ABORTED status, if the TAS bit is set to one in + * the Control mode page (see SPC-4). "No" indicates that no status is + * returned for aborted commands. + * + * d) If the logical unit reset is caused by a particular I_T nexus + * (e.g., by a LOGICAL UNIT RESET task management function), then "yes" + * (TASK_ABORTED status) applies. + * + * Otherwise (e.g., if triggered by a hard reset), "no" + * (no TASK_ABORTED SAM status) applies. + * + * Note that this seems to be independent of TAS (Task Aborted Status) + * in the Control Mode Page. + */ + for (i = 0; i < dev->queue_cnt; i++) { + flush_work(&dev->queues[i].sq.work); + + spin_lock_irqsave(&dev->queues[i].lock, flags); + list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, + state_list) { + /* + * For PREEMPT_AND_ABORT usage, only process commands + * with a matching reservation key. + */ + if (target_check_cdb_and_preempt(preempt_and_abort_list, + cmd)) + continue; + + /* + * Not aborting PROUT PREEMPT_AND_ABORT CDB.. + */ + if (prout_cmd == cmd) + continue; + + sess = cmd->se_sess; + if (WARN_ON_ONCE(!sess)) + continue; + + spin_lock(&sess->sess_cmd_lock); + rc = __target_check_io_state(cmd, tmr_sess, tas); + spin_unlock(&sess->sess_cmd_lock); + if (!rc) + continue; + + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + } + spin_unlock_irqrestore(&dev->queues[i].lock, flags); + } + + if (dev->transport->tmr_notify) + dev->transport->tmr_notify(dev, preempt_and_abort_list ? + TMR_LUN_RESET_PRO : TMR_LUN_RESET, + &drain_task_list); + + while (!list_empty(&drain_task_list)) { + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); + list_del_init(&cmd->state_list); + + target_show_cmd("LUN_RESET: ", cmd); + pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n", + cmd->tag, (preempt_and_abort_list) ? "preempt" : "", + cmd->pr_res_key); + + target_put_cmd_and_wait(cmd); + } +} + +int core_tmr_lun_reset( + struct se_device *dev, + struct se_tmr_req *tmr, + struct list_head *preempt_and_abort_list, + struct se_cmd *prout_cmd) +{ + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; + struct se_session *tmr_sess = NULL; + bool tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + tas = dev->dev_attrib.emulate_tas; + /* + * Determine if this se_tmr is coming from a $FABRIC_MOD + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { + tmr_sess = tmr->task_cmd->se_sess; + tmr_nacl = tmr_sess->se_node_acl; + tmr_tpg = tmr_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", + tmr_tpg->se_tpg_tfo->fabric_name, + tmr_nacl->initiatorname); + } + } + + + /* + * We only allow one reset or preempt and abort to execute at a time + * to prevent one call from claiming all the cmds causing a second + * call from returning while cmds it should have waited on are still + * running. + */ + mutex_lock(&dev->lun_reset_mutex); + + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + dev->transport->name, tas); + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); + core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, + preempt_and_abort_list); + + mutex_unlock(&dev->lun_reset_mutex); + + /* + * Clear any legacy SPC-2 reservation when called during + * LOGICAL UNIT RESET + */ + if (!preempt_and_abort_list && + (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) { + spin_lock(&dev->dev_reservation_lock); + dev->reservation_holder = NULL; + dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS; + spin_unlock(&dev->dev_reservation_lock); + pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); + } + + atomic_long_inc(&dev->num_resets); + + pr_debug("LUN_RESET: %s for [%s] Complete\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + dev->transport->name); + return 0; +} + diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c new file mode 100644 index 0000000000..c0e429e5ef --- /dev/null +++ b/drivers/target/target_core_tpg.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_tpg.c + * + * This file contains generic Target Portal Group related functions. + * + * (c) Copyright 2002-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/net.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/in.h> +#include <linux/export.h> +#include <net/sock.h> +#include <net/tcp.h> +#include <scsi/scsi_proto.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +extern struct se_device *g_lun0_dev; +static DEFINE_XARRAY_ALLOC(tpg_xa); + +/* __core_tpg_get_initiator_node_acl(): + * + * mutex_lock(&tpg->acl_node_mutex); must be held when calling + */ +struct se_node_acl *__core_tpg_get_initiator_node_acl( + struct se_portal_group *tpg, + const char *initiatorname) +{ + struct se_node_acl *acl; + + list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { + if (!strcmp(acl->initiatorname, initiatorname)) + return acl; + } + + return NULL; +} + +/* core_tpg_get_initiator_node_acl(): + * + * + */ +struct se_node_acl *core_tpg_get_initiator_node_acl( + struct se_portal_group *tpg, + unsigned char *initiatorname) +{ + struct se_node_acl *acl; + /* + * Obtain se_node_acl->acl_kref using fabric driver provided + * initiatorname[] during node acl endpoint lookup driven by + * new se_session login. + * + * The reference is held until se_session shutdown -> release + * occurs via fabric driver invoked transport_deregister_session() + * or transport_free_session() code. + */ + mutex_lock(&tpg->acl_node_mutex); + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); + if (acl) { + if (!kref_get_unless_zero(&acl->acl_kref)) + acl = NULL; + } + mutex_unlock(&tpg->acl_node_mutex); + + return acl; +} +EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); + +void core_allocate_nexus_loss_ua( + struct se_node_acl *nacl) +{ + struct se_dev_entry *deve; + + if (!nacl) + return; + + rcu_read_lock(); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) + core_scsi3_ua_allocate(deve, 0x29, + ASCQ_29H_NEXUS_LOSS_OCCURRED); + rcu_read_unlock(); +} +EXPORT_SYMBOL(core_allocate_nexus_loss_ua); + +/* core_tpg_add_node_to_devs(): + * + * + */ +void core_tpg_add_node_to_devs( + struct se_node_acl *acl, + struct se_portal_group *tpg, + struct se_lun *lun_orig) +{ + bool lun_access_ro = true; + struct se_lun *lun; + struct se_device *dev; + + mutex_lock(&tpg->tpg_lun_mutex); + hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) { + if (lun_orig && lun != lun_orig) + continue; + + dev = rcu_dereference_check(lun->lun_se_dev, + lockdep_is_held(&tpg->tpg_lun_mutex)); + /* + * By default in LIO-Target $FABRIC_MOD, + * demo_mode_write_protect is ON, or READ_ONLY; + */ + if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { + lun_access_ro = false; + } else { + /* + * Allow only optical drives to issue R/W in default RO + * demo mode. + */ + if (dev->transport->get_device_type(dev) == TYPE_DISK) + lun_access_ro = true; + else + lun_access_ro = false; + } + + pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" + " access for LUN in Demo Mode\n", + tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, + lun_access_ro ? "READ-ONLY" : "READ-WRITE"); + + core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, + lun_access_ro, acl, tpg); + /* + * Check to see if there are any existing persistent reservation + * APTPL pre-registrations that need to be enabled for this dynamic + * LUN ACL now.. + */ + core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, + lun->unpacked_lun); + } + mutex_unlock(&tpg->tpg_lun_mutex); +} + +static void +target_set_nacl_queue_depth(struct se_portal_group *tpg, + struct se_node_acl *acl, u32 queue_depth) +{ + acl->queue_depth = queue_depth; + + if (!acl->queue_depth) { + pr_warn("Queue depth for %s Initiator Node: %s is 0," + "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name, + acl->initiatorname); + acl->queue_depth = 1; + } +} + +static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, + const unsigned char *initiatorname) +{ + struct se_node_acl *acl; + u32 queue_depth; + + acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), + GFP_KERNEL); + if (!acl) + return NULL; + + INIT_LIST_HEAD(&acl->acl_list); + INIT_LIST_HEAD(&acl->acl_sess_list); + INIT_HLIST_HEAD(&acl->lun_entry_hlist); + kref_init(&acl->acl_kref); + init_completion(&acl->acl_free_comp); + spin_lock_init(&acl->nacl_sess_lock); + mutex_init(&acl->lun_entry_mutex); + atomic_set(&acl->acl_pr_ref_count, 0); + + if (tpg->se_tpg_tfo->tpg_get_default_depth) + queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); + else + queue_depth = 1; + target_set_nacl_queue_depth(tpg, acl, queue_depth); + + snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); + acl->se_tpg = tpg; + acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); + + tpg->se_tpg_tfo->set_default_node_attributes(acl); + + return acl; +} + +static void target_add_node_acl(struct se_node_acl *acl) +{ + struct se_portal_group *tpg = acl->se_tpg; + + mutex_lock(&tpg->acl_node_mutex); + list_add_tail(&acl->acl_list, &tpg->acl_node_list); + mutex_unlock(&tpg->acl_node_mutex); + + pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", + tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), + acl->dynamic_node_acl ? "DYNAMIC" : "", + acl->queue_depth, + tpg->se_tpg_tfo->fabric_name, + acl->initiatorname); +} + +bool target_tpg_has_node_acl(struct se_portal_group *tpg, + const char *initiatorname) +{ + struct se_node_acl *acl; + bool found = false; + + mutex_lock(&tpg->acl_node_mutex); + list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { + if (!strcmp(acl->initiatorname, initiatorname)) { + found = true; + break; + } + } + mutex_unlock(&tpg->acl_node_mutex); + + return found; +} +EXPORT_SYMBOL(target_tpg_has_node_acl); + +struct se_node_acl *core_tpg_check_initiator_node_acl( + struct se_portal_group *tpg, + unsigned char *initiatorname) +{ + struct se_node_acl *acl; + + acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); + if (acl) + return acl; + + if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) + return NULL; + + acl = target_alloc_node_acl(tpg, initiatorname); + if (!acl) + return NULL; + /* + * When allocating a dynamically generated node_acl, go ahead + * and take the extra kref now before returning to the fabric + * driver caller. + * + * Note this reference will be released at session shutdown + * time within transport_free_session() code. + */ + kref_get(&acl->acl_kref); + acl->dynamic_node_acl = 1; + + /* + * Here we only create demo-mode MappedLUNs from the active + * TPG LUNs if the fabric is not explicitly asking for + * tpg_check_demo_mode_login_only() == 1. + */ + if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || + (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) + core_tpg_add_node_to_devs(acl, tpg, NULL); + + target_add_node_acl(acl); + return acl; +} +EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); + +void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) +{ + while (atomic_read(&nacl->acl_pr_ref_count) != 0) + cpu_relax(); +} + +struct se_node_acl *core_tpg_add_initiator_node_acl( + struct se_portal_group *tpg, + const char *initiatorname) +{ + struct se_node_acl *acl; + + mutex_lock(&tpg->acl_node_mutex); + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); + if (acl) { + if (acl->dynamic_node_acl) { + acl->dynamic_node_acl = 0; + pr_debug("%s_TPG[%u] - Replacing dynamic ACL" + " for %s\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); + mutex_unlock(&tpg->acl_node_mutex); + return acl; + } + + pr_err("ACL entry for %s Initiator" + " Node %s already exists for TPG %u, ignoring" + " request.\n", tpg->se_tpg_tfo->fabric_name, + initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); + mutex_unlock(&tpg->acl_node_mutex); + return ERR_PTR(-EEXIST); + } + mutex_unlock(&tpg->acl_node_mutex); + + acl = target_alloc_node_acl(tpg, initiatorname); + if (!acl) + return ERR_PTR(-ENOMEM); + + target_add_node_acl(acl); + return acl; +} + +static void target_shutdown_sessions(struct se_node_acl *acl) +{ + struct se_session *sess; + unsigned long flags; + +restart: + spin_lock_irqsave(&acl->nacl_sess_lock, flags); + list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) { + if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped)) + continue; + + list_del_init(&sess->sess_acl_list); + spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); + + if (acl->se_tpg->se_tpg_tfo->close_session) + acl->se_tpg->se_tpg_tfo->close_session(sess); + goto restart; + } + spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); +} + +void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) +{ + struct se_portal_group *tpg = acl->se_tpg; + + mutex_lock(&tpg->acl_node_mutex); + if (acl->dynamic_node_acl) + acl->dynamic_node_acl = 0; + list_del_init(&acl->acl_list); + mutex_unlock(&tpg->acl_node_mutex); + + target_shutdown_sessions(acl); + + target_put_nacl(acl); + /* + * Wait for last target_put_nacl() to complete in target_complete_nacl() + * for active fabric session transport_deregister_session() callbacks. + */ + wait_for_completion(&acl->acl_free_comp); + + core_tpg_wait_for_nacl_pr_ref(acl); + core_free_device_list_for_node(acl, tpg); + + pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, + tpg->se_tpg_tfo->fabric_name, acl->initiatorname); + + kfree(acl); +} + +/* core_tpg_set_initiator_node_queue_depth(): + * + * + */ +int core_tpg_set_initiator_node_queue_depth( + struct se_node_acl *acl, + u32 queue_depth) +{ + struct se_portal_group *tpg = acl->se_tpg; + + /* + * Allow the setting of se_node_acl queue_depth to be idempotent, + * and not force a session shutdown event if the value is not + * changing. + */ + if (acl->queue_depth == queue_depth) + return 0; + /* + * User has requested to change the queue depth for a Initiator Node. + * Change the value in the Node's struct se_node_acl, and call + * target_set_nacl_queue_depth() to set the new queue depth. + */ + target_set_nacl_queue_depth(tpg, acl, queue_depth); + + /* + * Shutdown all pending sessions to force session reinstatement. + */ + target_shutdown_sessions(acl); + + pr_debug("Successfully changed queue depth to: %d for Initiator" + " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, + acl->initiatorname, tpg->se_tpg_tfo->fabric_name, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); + + return 0; +} +EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); + +/* core_tpg_set_initiator_node_tag(): + * + * Initiator nodeacl tags are not used internally, but may be used by + * userspace to emulate aliases or groups. + * Returns length of newly-set tag or -EINVAL. + */ +int core_tpg_set_initiator_node_tag( + struct se_portal_group *tpg, + struct se_node_acl *acl, + const char *new_tag) +{ + if (strlen(new_tag) >= MAX_ACL_TAG_SIZE) + return -EINVAL; + + if (!strncmp("NULL", new_tag, 4)) { + acl->acl_tag[0] = '\0'; + return 0; + } + + return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag); +} +EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); + +static void core_tpg_lun_ref_release(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_shutdown_comp); +} + +static int target_tpg_register_rtpi(struct se_portal_group *se_tpg) +{ + u32 val; + int ret; + + if (se_tpg->rtpi_manual) { + ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL); + if (ret) { + pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy", + se_tpg->se_tpg_tfo->fabric_name, + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg), + se_tpg->tpg_rtpi); + return -EINVAL; + } + } else { + ret = xa_alloc(&tpg_xa, &val, se_tpg, + XA_LIMIT(1, USHRT_MAX), GFP_KERNEL); + if (!ret) + se_tpg->tpg_rtpi = val; + } + + return ret; +} + +static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg) +{ + if (se_tpg->tpg_rtpi && se_tpg->enabled) + xa_erase(&tpg_xa, se_tpg->tpg_rtpi); +} + +int target_tpg_enable(struct se_portal_group *se_tpg) +{ + int ret; + + ret = target_tpg_register_rtpi(se_tpg); + if (ret) + return ret; + + ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true); + if (ret) { + target_tpg_deregister_rtpi(se_tpg); + return ret; + } + + se_tpg->enabled = true; + + return 0; +} + +int target_tpg_disable(struct se_portal_group *se_tpg) +{ + int ret; + + target_tpg_deregister_rtpi(se_tpg); + + ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false); + if (!ret) + se_tpg->enabled = false; + + return ret; +} + +/* Does not change se_wwn->priv. */ +int core_tpg_register( + struct se_wwn *se_wwn, + struct se_portal_group *se_tpg, + int proto_id) +{ + int ret; + + if (!se_tpg) + return -EINVAL; + /* + * For the typical case where core_tpg_register() is called by a + * fabric driver from target_core_fabric_ops->fabric_make_tpg() + * configfs context, use the original tf_ops pointer already saved + * by target-core in target_fabric_make_wwn(). + * + * Otherwise, for special cases like iscsi-target discovery TPGs + * the caller is responsible for setting ->se_tpg_tfo ahead of + * calling core_tpg_register(). + */ + if (se_wwn) + se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops; + + if (!se_tpg->se_tpg_tfo) { + pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n"); + return -EINVAL; + } + + INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); + se_tpg->proto_id = proto_id; + se_tpg->se_tpg_wwn = se_wwn; + atomic_set(&se_tpg->tpg_pr_ref_count, 0); + INIT_LIST_HEAD(&se_tpg->acl_node_list); + INIT_LIST_HEAD(&se_tpg->tpg_sess_list); + spin_lock_init(&se_tpg->session_lock); + mutex_init(&se_tpg->tpg_lun_mutex); + mutex_init(&se_tpg->acl_node_mutex); + + if (se_tpg->proto_id >= 0) { + se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0); + if (IS_ERR(se_tpg->tpg_virt_lun0)) + return PTR_ERR(se_tpg->tpg_virt_lun0); + + ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, + true, g_lun0_dev); + if (ret < 0) { + kfree(se_tpg->tpg_virt_lun0); + return ret; + } + } + + pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " + "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name, + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, + se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); + + return 0; +} +EXPORT_SYMBOL(core_tpg_register); + +int core_tpg_deregister(struct se_portal_group *se_tpg) +{ + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; + struct se_node_acl *nacl, *nacl_tmp; + LIST_HEAD(node_list); + + pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " + "Proto: %d, Portal Tag: %u\n", tfo->fabric_name, + tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, + se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); + + while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) + cpu_relax(); + + mutex_lock(&se_tpg->acl_node_mutex); + list_splice_init(&se_tpg->acl_node_list, &node_list); + mutex_unlock(&se_tpg->acl_node_mutex); + /* + * Release any remaining demo-mode generated se_node_acl that have + * not been released because of TFO->tpg_check_demo_mode_cache() == 1 + * in transport_deregister_session(). + */ + list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { + list_del_init(&nacl->acl_list); + + core_tpg_wait_for_nacl_pr_ref(nacl); + core_free_device_list_for_node(nacl, se_tpg); + kfree(nacl); + } + + if (se_tpg->proto_id >= 0) { + core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); + kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); + } + + target_tpg_deregister_rtpi(se_tpg); + + return 0; +} +EXPORT_SYMBOL(core_tpg_deregister); + +struct se_lun *core_tpg_alloc_lun( + struct se_portal_group *tpg, + u64 unpacked_lun) +{ + struct se_lun *lun; + + lun = kzalloc(sizeof(*lun), GFP_KERNEL); + if (!lun) { + pr_err("Unable to allocate se_lun memory\n"); + return ERR_PTR(-ENOMEM); + } + lun->unpacked_lun = unpacked_lun; + atomic_set(&lun->lun_acl_count, 0); + init_completion(&lun->lun_shutdown_comp); + INIT_LIST_HEAD(&lun->lun_deve_list); + INIT_LIST_HEAD(&lun->lun_dev_link); + atomic_set(&lun->lun_tg_pt_secondary_offline, 0); + spin_lock_init(&lun->lun_deve_lock); + mutex_init(&lun->lun_tg_pt_md_mutex); + INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link); + spin_lock_init(&lun->lun_tg_pt_gp_lock); + lun->lun_tpg = tpg; + + return lun; +} + +int core_tpg_add_lun( + struct se_portal_group *tpg, + struct se_lun *lun, + bool lun_access_ro, + struct se_device *dev) +{ + int ret; + + ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, + GFP_KERNEL); + if (ret < 0) + goto out; + + if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) && + !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) + target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); + + mutex_lock(&tpg->tpg_lun_mutex); + + spin_lock(&dev->se_port_lock); + lun->lun_index = dev->dev_index; + rcu_assign_pointer(lun->lun_se_dev, dev); + dev->export_count++; + list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); + spin_unlock(&dev->se_port_lock); + + if (dev->dev_flags & DF_READ_ONLY) + lun->lun_access_ro = true; + else + lun->lun_access_ro = lun_access_ro; + if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) + hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); + mutex_unlock(&tpg->tpg_lun_mutex); + + return 0; + +out: + return ret; +} + +void core_tpg_remove_lun( + struct se_portal_group *tpg, + struct se_lun *lun) +{ + /* + * rcu_dereference_raw protected by se_lun->lun_group symlink + * reference to se_device->dev_group. + */ + struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + + lun->lun_shutdown = true; + + core_clear_lun_from_tpg(lun, tpg); + /* + * Wait for any active I/O references to percpu se_lun->lun_ref to + * be released. Also, se_lun->lun_ref is now used by PR and ALUA + * logic when referencing a remote target port during ALL_TGT_PT=1 + * and generating UNIT_ATTENTIONs for ALUA access state transition. + */ + transport_clear_lun_ref(lun); + + mutex_lock(&tpg->tpg_lun_mutex); + if (lun->lun_se_dev) { + target_detach_tg_pt_gp(lun); + + spin_lock(&dev->se_port_lock); + list_del(&lun->lun_dev_link); + dev->export_count--; + rcu_assign_pointer(lun->lun_se_dev, NULL); + spin_unlock(&dev->se_port_lock); + } + if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) + hlist_del_rcu(&lun->link); + + lun->lun_shutdown = false; + mutex_unlock(&tpg->tpg_lun_mutex); + + percpu_ref_exit(&lun->lun_ref); +} diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c new file mode 100644 index 0000000000..0686882bcb --- /dev/null +++ b/drivers/target/target_core_transport.c @@ -0,0 +1,3670 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_transport.c + * + * This file contains the Generic Target Engine Core. + * + * (c) Copyright 2002-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/net.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/kthread.h> +#include <linux/in.h> +#include <linux/cdrom.h> +#include <linux/module.h> +#include <linux/ratelimit.h> +#include <linux/vmalloc.h> +#include <asm/unaligned.h> +#include <net/sock.h> +#include <net/tcp.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_common.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/target.h> + +static struct workqueue_struct *target_completion_wq; +static struct workqueue_struct *target_submission_wq; +static struct kmem_cache *se_sess_cache; +struct kmem_cache *se_ua_cache; +struct kmem_cache *t10_pr_reg_cache; +struct kmem_cache *t10_alua_lu_gp_cache; +struct kmem_cache *t10_alua_lu_gp_mem_cache; +struct kmem_cache *t10_alua_tg_pt_gp_cache; +struct kmem_cache *t10_alua_lba_map_cache; +struct kmem_cache *t10_alua_lba_map_mem_cache; + +static void transport_complete_task_attr(struct se_cmd *cmd); +static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); +static void transport_handle_queue_full(struct se_cmd *cmd, + struct se_device *dev, int err, bool write_pending); +static void target_complete_ok_work(struct work_struct *work); + +int init_se_kmem_caches(void) +{ + se_sess_cache = kmem_cache_create("se_sess_cache", + sizeof(struct se_session), __alignof__(struct se_session), + 0, NULL); + if (!se_sess_cache) { + pr_err("kmem_cache_create() for struct se_session" + " failed\n"); + goto out; + } + se_ua_cache = kmem_cache_create("se_ua_cache", + sizeof(struct se_ua), __alignof__(struct se_ua), + 0, NULL); + if (!se_ua_cache) { + pr_err("kmem_cache_create() for struct se_ua failed\n"); + goto out_free_sess_cache; + } + t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", + sizeof(struct t10_pr_registration), + __alignof__(struct t10_pr_registration), 0, NULL); + if (!t10_pr_reg_cache) { + pr_err("kmem_cache_create() for struct t10_pr_registration" + " failed\n"); + goto out_free_ua_cache; + } + t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", + sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), + 0, NULL); + if (!t10_alua_lu_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" + " failed\n"); + goto out_free_pr_reg_cache; + } + t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", + sizeof(struct t10_alua_lu_gp_member), + __alignof__(struct t10_alua_lu_gp_member), 0, NULL); + if (!t10_alua_lu_gp_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" + "cache failed\n"); + goto out_free_lu_gp_cache; + } + t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", + sizeof(struct t10_alua_tg_pt_gp), + __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); + if (!t10_alua_tg_pt_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" + "cache failed\n"); + goto out_free_lu_gp_mem_cache; + } + t10_alua_lba_map_cache = kmem_cache_create( + "t10_alua_lba_map_cache", + sizeof(struct t10_alua_lba_map), + __alignof__(struct t10_alua_lba_map), 0, NULL); + if (!t10_alua_lba_map_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_" + "cache failed\n"); + goto out_free_tg_pt_gp_cache; + } + t10_alua_lba_map_mem_cache = kmem_cache_create( + "t10_alua_lba_map_mem_cache", + sizeof(struct t10_alua_lba_map_member), + __alignof__(struct t10_alua_lba_map_member), 0, NULL); + if (!t10_alua_lba_map_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" + "cache failed\n"); + goto out_free_lba_map_cache; + } + + target_completion_wq = alloc_workqueue("target_completion", + WQ_MEM_RECLAIM, 0); + if (!target_completion_wq) + goto out_free_lba_map_mem_cache; + + target_submission_wq = alloc_workqueue("target_submission", + WQ_MEM_RECLAIM, 0); + if (!target_submission_wq) + goto out_free_completion_wq; + + return 0; + +out_free_completion_wq: + destroy_workqueue(target_completion_wq); +out_free_lba_map_mem_cache: + kmem_cache_destroy(t10_alua_lba_map_mem_cache); +out_free_lba_map_cache: + kmem_cache_destroy(t10_alua_lba_map_cache); +out_free_tg_pt_gp_cache: + kmem_cache_destroy(t10_alua_tg_pt_gp_cache); +out_free_lu_gp_mem_cache: + kmem_cache_destroy(t10_alua_lu_gp_mem_cache); +out_free_lu_gp_cache: + kmem_cache_destroy(t10_alua_lu_gp_cache); +out_free_pr_reg_cache: + kmem_cache_destroy(t10_pr_reg_cache); +out_free_ua_cache: + kmem_cache_destroy(se_ua_cache); +out_free_sess_cache: + kmem_cache_destroy(se_sess_cache); +out: + return -ENOMEM; +} + +void release_se_kmem_caches(void) +{ + destroy_workqueue(target_submission_wq); + destroy_workqueue(target_completion_wq); + kmem_cache_destroy(se_sess_cache); + kmem_cache_destroy(se_ua_cache); + kmem_cache_destroy(t10_pr_reg_cache); + kmem_cache_destroy(t10_alua_lu_gp_cache); + kmem_cache_destroy(t10_alua_lu_gp_mem_cache); + kmem_cache_destroy(t10_alua_tg_pt_gp_cache); + kmem_cache_destroy(t10_alua_lba_map_cache); + kmem_cache_destroy(t10_alua_lba_map_mem_cache); +} + +/* This code ensures unique mib indexes are handed out. */ +static DEFINE_SPINLOCK(scsi_mib_index_lock); +static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; + +/* + * Allocate a new row index for the entry type specified + */ +u32 scsi_get_new_index(scsi_index_t type) +{ + u32 new_index; + + BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); + + spin_lock(&scsi_mib_index_lock); + new_index = ++scsi_mib_index[type]; + spin_unlock(&scsi_mib_index_lock); + + return new_index; +} + +void transport_subsystem_check_init(void) +{ + int ret; + static int sub_api_initialized; + + if (sub_api_initialized) + return; + + ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); + if (ret != 0) + pr_err("Unable to load target_core_iblock\n"); + + ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); + if (ret != 0) + pr_err("Unable to load target_core_file\n"); + + ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); + if (ret != 0) + pr_err("Unable to load target_core_pscsi\n"); + + ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); + if (ret != 0) + pr_err("Unable to load target_core_user\n"); + + sub_api_initialized = 1; +} + +static void target_release_cmd_refcnt(struct percpu_ref *ref) +{ + struct target_cmd_counter *cmd_cnt = container_of(ref, + typeof(*cmd_cnt), + refcnt); + wake_up(&cmd_cnt->refcnt_wq); +} + +struct target_cmd_counter *target_alloc_cmd_counter(void) +{ + struct target_cmd_counter *cmd_cnt; + int rc; + + cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL); + if (!cmd_cnt) + return NULL; + + init_completion(&cmd_cnt->stop_done); + init_waitqueue_head(&cmd_cnt->refcnt_wq); + atomic_set(&cmd_cnt->stopped, 0); + + rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0, + GFP_KERNEL); + if (rc) + goto free_cmd_cnt; + + return cmd_cnt; + +free_cmd_cnt: + kfree(cmd_cnt); + return NULL; +} +EXPORT_SYMBOL_GPL(target_alloc_cmd_counter); + +void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt) +{ + /* + * Drivers like loop do not call target_stop_session during session + * shutdown so we have to drop the ref taken at init time here. + */ + if (!atomic_read(&cmd_cnt->stopped)) + percpu_ref_put(&cmd_cnt->refcnt); + + percpu_ref_exit(&cmd_cnt->refcnt); + kfree(cmd_cnt); +} +EXPORT_SYMBOL_GPL(target_free_cmd_counter); + +/** + * transport_init_session - initialize a session object + * @se_sess: Session object pointer. + * + * The caller must have zero-initialized @se_sess before calling this function. + */ +void transport_init_session(struct se_session *se_sess) +{ + INIT_LIST_HEAD(&se_sess->sess_list); + INIT_LIST_HEAD(&se_sess->sess_acl_list); + spin_lock_init(&se_sess->sess_cmd_lock); +} +EXPORT_SYMBOL(transport_init_session); + +/** + * transport_alloc_session - allocate a session object and initialize it + * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. + */ +struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) +{ + struct se_session *se_sess; + + se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); + if (!se_sess) { + pr_err("Unable to allocate struct se_session from" + " se_sess_cache\n"); + return ERR_PTR(-ENOMEM); + } + transport_init_session(se_sess); + se_sess->sup_prot_ops = sup_prot_ops; + + return se_sess; +} +EXPORT_SYMBOL(transport_alloc_session); + +/** + * transport_alloc_session_tags - allocate target driver private data + * @se_sess: Session pointer. + * @tag_num: Maximum number of in-flight commands between initiator and target. + * @tag_size: Size in bytes of the private data a target driver associates with + * each command. + */ +int transport_alloc_session_tags(struct se_session *se_sess, + unsigned int tag_num, unsigned int tag_size) +{ + int rc; + + se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, + GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!se_sess->sess_cmd_map) { + pr_err("Unable to allocate se_sess->sess_cmd_map\n"); + return -ENOMEM; + } + + rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, + false, GFP_KERNEL, NUMA_NO_NODE); + if (rc < 0) { + pr_err("Unable to init se_sess->sess_tag_pool," + " tag_num: %u\n", tag_num); + kvfree(se_sess->sess_cmd_map); + se_sess->sess_cmd_map = NULL; + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(transport_alloc_session_tags); + +/** + * transport_init_session_tags - allocate a session and target driver private data + * @tag_num: Maximum number of in-flight commands between initiator and target. + * @tag_size: Size in bytes of the private data a target driver associates with + * each command. + * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. + */ +static struct se_session * +transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, + enum target_prot_op sup_prot_ops) +{ + struct se_session *se_sess; + int rc; + + if (tag_num != 0 && !tag_size) { + pr_err("init_session_tags called with percpu-ida tag_num:" + " %u, but zero tag_size\n", tag_num); + return ERR_PTR(-EINVAL); + } + if (!tag_num && tag_size) { + pr_err("init_session_tags called with percpu-ida tag_size:" + " %u, but zero tag_num\n", tag_size); + return ERR_PTR(-EINVAL); + } + + se_sess = transport_alloc_session(sup_prot_ops); + if (IS_ERR(se_sess)) + return se_sess; + + rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); + if (rc < 0) { + transport_free_session(se_sess); + return ERR_PTR(-ENOMEM); + } + + return se_sess; +} + +/* + * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. + */ +void __transport_register_session( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct se_session *se_sess, + void *fabric_sess_ptr) +{ + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; + unsigned char buf[PR_REG_ISID_LEN]; + unsigned long flags; + + se_sess->se_tpg = se_tpg; + se_sess->fabric_sess_ptr = fabric_sess_ptr; + /* + * Used by struct se_node_acl's under ConfigFS to locate active se_session-t + * + * Only set for struct se_session's that will actually be moving I/O. + * eg: *NOT* discovery sessions. + */ + if (se_nacl) { + /* + * + * Determine if fabric allows for T10-PI feature bits exposed to + * initiators for device backends with !dev->dev_attrib.pi_prot_type. + * + * If so, then always save prot_type on a per se_node_acl node + * basis and re-instate the previous sess_prot_type to avoid + * disabling PI from below any previously initiator side + * registered LUNs. + */ + if (se_nacl->saved_prot_type) + se_sess->sess_prot_type = se_nacl->saved_prot_type; + else if (tfo->tpg_check_prot_fabric_only) + se_sess->sess_prot_type = se_nacl->saved_prot_type = + tfo->tpg_check_prot_fabric_only(se_tpg); + /* + * If the fabric module supports an ISID based TransportID, + * save this value in binary from the fabric I_T Nexus now. + */ + if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { + memset(&buf[0], 0, PR_REG_ISID_LEN); + se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, + &buf[0], PR_REG_ISID_LEN); + se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); + } + + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); + /* + * The se_nacl->nacl_sess pointer will be set to the + * last active I_T Nexus for each struct se_node_acl. + */ + se_nacl->nacl_sess = se_sess; + + list_add_tail(&se_sess->sess_acl_list, + &se_nacl->acl_sess_list); + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); + } + list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); + + pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", + se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); +} +EXPORT_SYMBOL(__transport_register_session); + +void transport_register_session( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct se_session *se_sess, + void *fabric_sess_ptr) +{ + unsigned long flags; + + spin_lock_irqsave(&se_tpg->session_lock, flags); + __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); + spin_unlock_irqrestore(&se_tpg->session_lock, flags); +} +EXPORT_SYMBOL(transport_register_session); + +struct se_session * +target_setup_session(struct se_portal_group *tpg, + unsigned int tag_num, unsigned int tag_size, + enum target_prot_op prot_op, + const char *initiatorname, void *private, + int (*callback)(struct se_portal_group *, + struct se_session *, void *)) +{ + struct target_cmd_counter *cmd_cnt; + struct se_session *sess; + int rc; + + cmd_cnt = target_alloc_cmd_counter(); + if (!cmd_cnt) + return ERR_PTR(-ENOMEM); + /* + * If the fabric driver is using percpu-ida based pre allocation + * of I/O descriptor tags, go ahead and perform that setup now.. + */ + if (tag_num != 0) + sess = transport_init_session_tags(tag_num, tag_size, prot_op); + else + sess = transport_alloc_session(prot_op); + + if (IS_ERR(sess)) { + rc = PTR_ERR(sess); + goto free_cnt; + } + sess->cmd_cnt = cmd_cnt; + + sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, + (unsigned char *)initiatorname); + if (!sess->se_node_acl) { + rc = -EACCES; + goto free_sess; + } + /* + * Go ahead and perform any remaining fabric setup that is + * required before transport_register_session(). + */ + if (callback != NULL) { + rc = callback(tpg, sess, private); + if (rc) + goto free_sess; + } + + transport_register_session(tpg, sess->se_node_acl, sess, private); + return sess; + +free_sess: + transport_free_session(sess); + return ERR_PTR(rc); + +free_cnt: + target_free_cmd_counter(cmd_cnt); + return ERR_PTR(rc); +} +EXPORT_SYMBOL(target_setup_session); + +ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) +{ + struct se_session *se_sess; + ssize_t len = 0; + + spin_lock_bh(&se_tpg->session_lock); + list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { + if (!se_sess->se_node_acl) + continue; + if (!se_sess->se_node_acl->dynamic_node_acl) + continue; + if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) + break; + + len += snprintf(page + len, PAGE_SIZE - len, "%s\n", + se_sess->se_node_acl->initiatorname); + len += 1; /* Include NULL terminator */ + } + spin_unlock_bh(&se_tpg->session_lock); + + return len; +} +EXPORT_SYMBOL(target_show_dynamic_sessions); + +static void target_complete_nacl(struct kref *kref) +{ + struct se_node_acl *nacl = container_of(kref, + struct se_node_acl, acl_kref); + struct se_portal_group *se_tpg = nacl->se_tpg; + + if (!nacl->dynamic_stop) { + complete(&nacl->acl_free_comp); + return; + } + + mutex_lock(&se_tpg->acl_node_mutex); + list_del_init(&nacl->acl_list); + mutex_unlock(&se_tpg->acl_node_mutex); + + core_tpg_wait_for_nacl_pr_ref(nacl); + core_free_device_list_for_node(nacl, se_tpg); + kfree(nacl); +} + +void target_put_nacl(struct se_node_acl *nacl) +{ + kref_put(&nacl->acl_kref, target_complete_nacl); +} +EXPORT_SYMBOL(target_put_nacl); + +void transport_deregister_session_configfs(struct se_session *se_sess) +{ + struct se_node_acl *se_nacl; + unsigned long flags; + /* + * Used by struct se_node_acl's under ConfigFS to locate active struct se_session + */ + se_nacl = se_sess->se_node_acl; + if (se_nacl) { + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); + if (!list_empty(&se_sess->sess_acl_list)) + list_del_init(&se_sess->sess_acl_list); + /* + * If the session list is empty, then clear the pointer. + * Otherwise, set the struct se_session pointer from the tail + * element of the per struct se_node_acl active session list. + */ + if (list_empty(&se_nacl->acl_sess_list)) + se_nacl->nacl_sess = NULL; + else { + se_nacl->nacl_sess = container_of( + se_nacl->acl_sess_list.prev, + struct se_session, sess_acl_list); + } + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); + } +} +EXPORT_SYMBOL(transport_deregister_session_configfs); + +void transport_free_session(struct se_session *se_sess) +{ + struct se_node_acl *se_nacl = se_sess->se_node_acl; + + /* + * Drop the se_node_acl->nacl_kref obtained from within + * core_tpg_get_initiator_node_acl(). + */ + if (se_nacl) { + struct se_portal_group *se_tpg = se_nacl->se_tpg; + const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; + unsigned long flags; + + se_sess->se_node_acl = NULL; + + /* + * Also determine if we need to drop the extra ->cmd_kref if + * it had been previously dynamically generated, and + * the endpoint is not caching dynamic ACLs. + */ + mutex_lock(&se_tpg->acl_node_mutex); + if (se_nacl->dynamic_node_acl && + !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); + if (list_empty(&se_nacl->acl_sess_list)) + se_nacl->dynamic_stop = true; + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); + + if (se_nacl->dynamic_stop) + list_del_init(&se_nacl->acl_list); + } + mutex_unlock(&se_tpg->acl_node_mutex); + + if (se_nacl->dynamic_stop) + target_put_nacl(se_nacl); + + target_put_nacl(se_nacl); + } + if (se_sess->sess_cmd_map) { + sbitmap_queue_free(&se_sess->sess_tag_pool); + kvfree(se_sess->sess_cmd_map); + } + if (se_sess->cmd_cnt) + target_free_cmd_counter(se_sess->cmd_cnt); + kmem_cache_free(se_sess_cache, se_sess); +} +EXPORT_SYMBOL(transport_free_session); + +static int target_release_res(struct se_device *dev, void *data) +{ + struct se_session *sess = data; + + if (dev->reservation_holder == sess) + target_release_reservation(dev); + return 0; +} + +void transport_deregister_session(struct se_session *se_sess) +{ + struct se_portal_group *se_tpg = se_sess->se_tpg; + unsigned long flags; + + if (!se_tpg) { + transport_free_session(se_sess); + return; + } + + spin_lock_irqsave(&se_tpg->session_lock, flags); + list_del(&se_sess->sess_list); + se_sess->se_tpg = NULL; + se_sess->fabric_sess_ptr = NULL; + spin_unlock_irqrestore(&se_tpg->session_lock, flags); + + /* + * Since the session is being removed, release SPC-2 + * reservations held by the session that is disappearing. + */ + target_for_each_device(target_release_res, se_sess); + + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", + se_tpg->se_tpg_tfo->fabric_name); + /* + * If last kref is dropping now for an explicit NodeACL, awake sleeping + * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group + * removal context from within transport_free_session() code. + * + * For dynamic ACL, target_put_nacl() uses target_complete_nacl() + * to release all remaining generate_node_acl=1 created ACL resources. + */ + + transport_free_session(se_sess); +} +EXPORT_SYMBOL(transport_deregister_session); + +void target_remove_session(struct se_session *se_sess) +{ + transport_deregister_session_configfs(se_sess); + transport_deregister_session(se_sess); +} +EXPORT_SYMBOL(target_remove_session); + +static void target_remove_from_state_list(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); + if (cmd->state_active) { + list_del(&cmd->state_list); + cmd->state_active = false; + } + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); +} + +static void target_remove_from_tmr_list(struct se_cmd *cmd) +{ + struct se_device *dev = NULL; + unsigned long flags; + + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + dev = cmd->se_tmr_req->tmr_dev; + + if (dev) { + spin_lock_irqsave(&dev->se_tmr_lock, flags); + if (cmd->se_tmr_req->tmr_dev) + list_del_init(&cmd->se_tmr_req->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + } +} +/* + * This function is called by the target core after the target core has + * finished processing a SCSI command or SCSI TMF. Both the regular command + * processing code and the code for aborting commands can call this + * function. CMD_T_STOP is set if and only if another thread is waiting + * inside transport_wait_for_tasks() for t_transport_stop_comp. + */ +static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + */ + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + complete_all(&cmd->t_transport_stop_comp); + return 1; + } + cmd->transport_state &= ~CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + /* + * Some fabric modules like tcm_loop can release their internally + * allocated I/O reference and struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the se_cmd being + * passed is released at this point, or zero if not being released. + */ + return cmd->se_tfo->check_stop_free(cmd); +} + +static void transport_lun_remove_cmd(struct se_cmd *cmd) +{ + struct se_lun *lun = cmd->se_lun; + + if (!lun) + return; + + target_remove_from_state_list(cmd); + target_remove_from_tmr_list(cmd); + + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); + + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; +} + +static void target_complete_failure_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + + transport_generic_request_failure(cmd, cmd->sense_reason); +} + +/* + * Used when asking transport to copy Sense Data from the underlying + * Linux/SCSI struct scsi_cmnd + */ +static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + WARN_ON(!cmd->se_lun); + + if (!dev) + return NULL; + + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) + return NULL; + + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; + + pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); + return cmd->sense_buffer; +} + +void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) +{ + unsigned char *cmd_sense_buf; + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd_sense_buf = transport_get_sense_buffer(cmd); + if (!cmd_sense_buf) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + + cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; + memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); +} +EXPORT_SYMBOL(transport_copy_sense_to_cmd); + +static void target_handle_abort(struct se_cmd *cmd) +{ + bool tas = cmd->transport_state & CMD_T_TAS; + bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; + int ret; + + pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); + + if (tas) { + if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", + cmd->t_task_cdb[0], cmd->tag); + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) { + transport_handle_queue_full(cmd, cmd->se_dev, + ret, false); + return; + } + } else { + cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; + cmd->se_tfo->queue_tm_rsp(cmd); + } + } else { + /* + * Allow the fabric driver to unmap any resources before + * releasing the descriptor via TFO->release_cmd(). + */ + cmd->se_tfo->aborted_task(cmd); + if (ack_kref) + WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); + /* + * To do: establish a unit attention condition on the I_T + * nexus associated with cmd. See also the paragraph "Aborting + * commands" in SAM. + */ + } + + WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); + + transport_lun_remove_cmd(cmd); + + transport_cmd_check_stop_to_fabric(cmd); +} + +static void target_abort_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + + target_handle_abort(cmd); +} + +static bool target_cmd_interrupted(struct se_cmd *cmd) +{ + int post_ret; + + if (cmd->transport_state & CMD_T_ABORTED) { + if (cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd, false, &post_ret); + INIT_WORK(&cmd->work, target_abort_work); + queue_work(target_completion_wq, &cmd->work); + return true; + } else if (cmd->transport_state & CMD_T_STOP) { + if (cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd, false, &post_ret); + complete_all(&cmd->t_transport_stop_comp); + return true; + } + + return false; +} + +/* May be called from interrupt context so must not sleep. */ +void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status, + sense_reason_t sense_reason) +{ + struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; + int success, cpu; + unsigned long flags; + + if (target_cmd_interrupted(cmd)) + return; + + cmd->scsi_status = scsi_status; + cmd->sense_reason = sense_reason; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + switch (cmd->scsi_status) { + case SAM_STAT_CHECK_CONDITION: + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + success = 1; + else + success = 0; + break; + default: + success = 1; + break; + } + + cmd->t_state = TRANSPORT_COMPLETE; + cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + INIT_WORK(&cmd->work, success ? target_complete_ok_work : + target_complete_failure_work); + + if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) + cpu = cmd->cpuid; + else + cpu = wwn->cmd_compl_affinity; + + queue_work_on(cpu, target_completion_wq, &cmd->work); +} +EXPORT_SYMBOL(target_complete_cmd_with_sense); + +void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) +{ + target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ? + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE : + TCM_NO_SENSE); +} +EXPORT_SYMBOL(target_complete_cmd); + +void target_set_cmd_data_length(struct se_cmd *cmd, int length) +{ + if (length < cmd->data_length) { + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + cmd->residual_count += cmd->data_length - length; + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = cmd->data_length - length; + } + + cmd->data_length = length; + } +} +EXPORT_SYMBOL(target_set_cmd_data_length); + +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD || + cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { + target_set_cmd_data_length(cmd, length); + } + + target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); + +static void target_add_to_state_list(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); + if (!cmd->state_active) { + list_add_tail(&cmd->state_list, + &dev->queues[cmd->cpuid].state_list); + cmd->state_active = true; + } + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); +} + +/* + * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status + */ +static void transport_write_pending_qf(struct se_cmd *cmd); +static void transport_complete_qf(struct se_cmd *cmd); + +void target_qf_do_work(struct work_struct *work) +{ + struct se_device *dev = container_of(work, struct se_device, + qf_work_queue); + LIST_HEAD(qf_cmd_list); + struct se_cmd *cmd, *cmd_tmp; + + spin_lock_irq(&dev->qf_cmd_lock); + list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); + spin_unlock_irq(&dev->qf_cmd_lock); + + list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { + list_del(&cmd->se_qf_node); + atomic_dec_mb(&dev->dev_qf_count); + + pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" + " context: %s\n", cmd->se_tfo->fabric_name, cmd, + (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : + (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" + : "UNKNOWN"); + + if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) + transport_write_pending_qf(cmd); + else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || + cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) + transport_complete_qf(cmd); + } +} + +unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) +{ + switch (cmd->data_direction) { + case DMA_NONE: + return "NONE"; + case DMA_FROM_DEVICE: + return "READ"; + case DMA_TO_DEVICE: + return "WRITE"; + case DMA_BIDIRECTIONAL: + return "BIDI"; + default: + break; + } + + return "UNKNOWN"; +} + +void transport_dump_dev_state( + struct se_device *dev, + char *b, + int *bl) +{ + *bl += sprintf(b + *bl, "Status: "); + if (dev->export_count) + *bl += sprintf(b + *bl, "ACTIVATED"); + else + *bl += sprintf(b + *bl, "DEACTIVATED"); + + *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); + *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", + dev->dev_attrib.block_size, + dev->dev_attrib.hw_max_sectors); + *bl += sprintf(b + *bl, " "); +} + +void transport_dump_vpd_proto_id( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int len; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + len = sprintf(buf, "T10 VPD Protocol Identifier: "); + + switch (vpd->protocol_identifier) { + case 0x00: + sprintf(buf+len, "Fibre Channel\n"); + break; + case 0x10: + sprintf(buf+len, "Parallel SCSI\n"); + break; + case 0x20: + sprintf(buf+len, "SSA\n"); + break; + case 0x30: + sprintf(buf+len, "IEEE 1394\n"); + break; + case 0x40: + sprintf(buf+len, "SCSI Remote Direct Memory Access" + " Protocol\n"); + break; + case 0x50: + sprintf(buf+len, "Internet SCSI (iSCSI)\n"); + break; + case 0x60: + sprintf(buf+len, "SAS Serial SCSI Protocol\n"); + break; + case 0x70: + sprintf(buf+len, "Automation/Drive Interface Transport" + " Protocol\n"); + break; + case 0x80: + sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); + break; + default: + sprintf(buf+len, "Unknown 0x%02x\n", + vpd->protocol_identifier); + break; + } + + if (p_buf) + strncpy(p_buf, buf, p_buf_len); + else + pr_debug("%s", buf); +} + +void +transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) +{ + /* + * Check if the Protocol Identifier Valid (PIV) bit is set.. + * + * from spc3r23.pdf section 7.5.1 + */ + if (page_83[1] & 0x80) { + vpd->protocol_identifier = (page_83[0] & 0xf0); + vpd->protocol_identifier_set = 1; + transport_dump_vpd_proto_id(vpd, NULL, 0); + } +} +EXPORT_SYMBOL(transport_set_vpd_proto_id); + +int transport_dump_vpd_assoc( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int ret = 0; + int len; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + len = sprintf(buf, "T10 VPD Identifier Association: "); + + switch (vpd->association) { + case 0x00: + sprintf(buf+len, "addressed logical unit\n"); + break; + case 0x10: + sprintf(buf+len, "target port\n"); + break; + case 0x20: + sprintf(buf+len, "SCSI target device\n"); + break; + default: + sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); + ret = -EINVAL; + break; + } + + if (p_buf) + strncpy(p_buf, buf, p_buf_len); + else + pr_debug("%s", buf); + + return ret; +} + +int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) +{ + /* + * The VPD identification association.. + * + * from spc3r23.pdf Section 7.6.3.1 Table 297 + */ + vpd->association = (page_83[1] & 0x30); + return transport_dump_vpd_assoc(vpd, NULL, 0); +} +EXPORT_SYMBOL(transport_set_vpd_assoc); + +int transport_dump_vpd_ident_type( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int ret = 0; + int len; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + len = sprintf(buf, "T10 VPD Identifier Type: "); + + switch (vpd->device_identifier_type) { + case 0x00: + sprintf(buf+len, "Vendor specific\n"); + break; + case 0x01: + sprintf(buf+len, "T10 Vendor ID based\n"); + break; + case 0x02: + sprintf(buf+len, "EUI-64 based\n"); + break; + case 0x03: + sprintf(buf+len, "NAA\n"); + break; + case 0x04: + sprintf(buf+len, "Relative target port identifier\n"); + break; + case 0x08: + sprintf(buf+len, "SCSI name string\n"); + break; + default: + sprintf(buf+len, "Unsupported: 0x%02x\n", + vpd->device_identifier_type); + ret = -EINVAL; + break; + } + + if (p_buf) { + if (p_buf_len < strlen(buf)+1) + return -EINVAL; + strncpy(p_buf, buf, p_buf_len); + } else { + pr_debug("%s", buf); + } + + return ret; +} + +int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) +{ + /* + * The VPD identifier type.. + * + * from spc3r23.pdf Section 7.6.3.1 Table 298 + */ + vpd->device_identifier_type = (page_83[1] & 0x0f); + return transport_dump_vpd_ident_type(vpd, NULL, 0); +} +EXPORT_SYMBOL(transport_set_vpd_ident_type); + +int transport_dump_vpd_ident( + struct t10_vpd *vpd, + unsigned char *p_buf, + int p_buf_len) +{ + unsigned char buf[VPD_TMP_BUF_SIZE]; + int ret = 0; + + memset(buf, 0, VPD_TMP_BUF_SIZE); + + switch (vpd->device_identifier_code_set) { + case 0x01: /* Binary */ + snprintf(buf, sizeof(buf), + "T10 VPD Binary Device Identifier: %s\n", + &vpd->device_identifier[0]); + break; + case 0x02: /* ASCII */ + snprintf(buf, sizeof(buf), + "T10 VPD ASCII Device Identifier: %s\n", + &vpd->device_identifier[0]); + break; + case 0x03: /* UTF-8 */ + snprintf(buf, sizeof(buf), + "T10 VPD UTF-8 Device Identifier: %s\n", + &vpd->device_identifier[0]); + break; + default: + sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" + " 0x%02x", vpd->device_identifier_code_set); + ret = -EINVAL; + break; + } + + if (p_buf) + strncpy(p_buf, buf, p_buf_len); + else + pr_debug("%s", buf); + + return ret; +} + +int +transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) +{ + static const char hex_str[] = "0123456789abcdef"; + int j = 0, i = 4; /* offset to start of the identifier */ + + /* + * The VPD Code Set (encoding) + * + * from spc3r23.pdf Section 7.6.3.1 Table 296 + */ + vpd->device_identifier_code_set = (page_83[0] & 0x0f); + switch (vpd->device_identifier_code_set) { + case 0x01: /* Binary */ + vpd->device_identifier[j++] = + hex_str[vpd->device_identifier_type]; + while (i < (4 + page_83[3])) { + vpd->device_identifier[j++] = + hex_str[(page_83[i] & 0xf0) >> 4]; + vpd->device_identifier[j++] = + hex_str[page_83[i] & 0x0f]; + i++; + } + break; + case 0x02: /* ASCII */ + case 0x03: /* UTF-8 */ + while (i < (4 + page_83[3])) + vpd->device_identifier[j++] = page_83[i++]; + break; + default: + break; + } + + return transport_dump_vpd_ident(vpd, NULL, 0); +} +EXPORT_SYMBOL(transport_set_vpd_ident); + +static sense_reason_t +target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, + unsigned int size) +{ + u32 mtl; + + if (!cmd->se_tfo->max_data_sg_nents) + return TCM_NO_SENSE; + /* + * Check if fabric enforced maximum SGL entries per I/O descriptor + * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + + * residual_count and reduce original cmd->data_length to maximum + * length based on single PAGE_SIZE entry scatter-lists. + */ + mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); + if (cmd->data_length > mtl) { + /* + * If an existing CDB overflow is present, calculate new residual + * based on CDB size minus fabric maximum transfer length. + * + * If an existing CDB underflow is present, calculate new residual + * based on original cmd->data_length minus fabric maximum transfer + * length. + * + * Otherwise, set the underflow residual based on cmd->data_length + * minus fabric maximum transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + cmd->residual_count = (size - mtl); + } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + u32 orig_dl = size + cmd->residual_count; + cmd->residual_count = (orig_dl - mtl); + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = (cmd->data_length - mtl); + } + cmd->data_length = mtl; + /* + * Reset sbc_check_prot() calculated protection payload + * length based upon the new smaller MTL. + */ + if (cmd->prot_length) { + u32 sectors = (mtl / dev->dev_attrib.block_size); + cmd->prot_length = dev->prot_length * sectors; + } + } + return TCM_NO_SENSE; +} + +/** + * target_cmd_size_check - Check whether there will be a residual. + * @cmd: SCSI command. + * @size: Data buffer size derived from CDB. The data buffer size provided by + * the SCSI transport driver is available in @cmd->data_length. + * + * Compare the data buffer size from the CDB with the data buffer limit from the transport + * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. + * + * Note: target drivers set @cmd->data_length by calling __target_init_cmd(). + * + * Return: TCM_NO_SENSE + */ +sense_reason_t +target_cmd_size_check(struct se_cmd *cmd, unsigned int size) +{ + struct se_device *dev = cmd->se_dev; + + if (cmd->unknown_data_length) { + cmd->data_length = size; + } else if (size != cmd->data_length) { + pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" + " %u does not match SCSI CDB Length: %u for SAM Opcode:" + " 0x%02x\n", cmd->se_tfo->fabric_name, + cmd->data_length, size, cmd->t_task_cdb[0]); + /* + * For READ command for the overflow case keep the existing + * fabric provided ->data_length. Otherwise for the underflow + * case, reset ->data_length to the smaller SCSI expected data + * transfer length. + */ + if (size > cmd->data_length) { + cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; + cmd->residual_count = (size - cmd->data_length); + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = (cmd->data_length - size); + /* + * Do not truncate ->data_length for WRITE command to + * dump all payload + */ + if (cmd->data_direction == DMA_FROM_DEVICE) { + cmd->data_length = size; + } + } + + if (cmd->data_direction == DMA_TO_DEVICE) { + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { + pr_err_ratelimited("Rejecting underflow/overflow" + " for WRITE data CDB\n"); + return TCM_INVALID_FIELD_IN_COMMAND_IU; + } + /* + * Some fabric drivers like iscsi-target still expect to + * always reject overflow writes. Reject this case until + * full fabric driver level support for overflow writes + * is introduced tree-wide. + */ + if (size > cmd->data_length) { + pr_err_ratelimited("Rejecting overflow for" + " WRITE control CDB\n"); + return TCM_INVALID_CDB_FIELD; + } + } + } + + return target_check_max_data_sg_nents(cmd, dev, size); + +} + +/* + * Used by fabric modules containing a local struct se_cmd within their + * fabric dependent per I/O descriptor. + * + * Preserves the value of @cmd->tag. + */ +void __target_init_cmd(struct se_cmd *cmd, + const struct target_core_fabric_ops *tfo, + struct se_session *se_sess, u32 data_length, + int data_direction, int task_attr, + unsigned char *sense_buffer, u64 unpacked_lun, + struct target_cmd_counter *cmd_cnt) +{ + INIT_LIST_HEAD(&cmd->se_delayed_node); + INIT_LIST_HEAD(&cmd->se_qf_node); + INIT_LIST_HEAD(&cmd->state_list); + init_completion(&cmd->t_transport_stop_comp); + cmd->free_compl = NULL; + cmd->abrt_compl = NULL; + spin_lock_init(&cmd->t_state_lock); + INIT_WORK(&cmd->work, NULL); + kref_init(&cmd->cmd_kref); + + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; + cmd->se_tfo = tfo; + cmd->se_sess = se_sess; + cmd->data_length = data_length; + cmd->data_direction = data_direction; + cmd->sam_task_attr = task_attr; + cmd->sense_buffer = sense_buffer; + cmd->orig_fe_lun = unpacked_lun; + cmd->cmd_cnt = cmd_cnt; + + if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) + cmd->cpuid = raw_smp_processor_id(); + + cmd->state_active = false; +} +EXPORT_SYMBOL(__target_init_cmd); + +static sense_reason_t +transport_check_alloc_task_attr(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + /* + * Check if SAM Task Attribute emulation is enabled for this + * struct se_device storage object + */ + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + return 0; + + if (cmd->sam_task_attr == TCM_ACA_TAG) { + pr_debug("SAM Task Attribute ACA" + " emulation is not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + return 0; +} + +sense_reason_t +target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) +{ + sense_reason_t ret; + + /* + * Ensure that the received CDB is less than the max (252 + 8) bytes + * for VARIABLE_LENGTH_CMD + */ + if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { + pr_err("Received SCSI CDB with command_size: %d that" + " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", + scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); + ret = TCM_INVALID_CDB_FIELD; + goto err; + } + /* + * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, + * allocate the additional extended CDB buffer now.. Otherwise + * setup the pointer from __t_task_cdb to t_task_cdb. + */ + if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { + cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); + if (!cmd->t_task_cdb) { + pr_err("Unable to allocate cmd->t_task_cdb" + " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", + scsi_command_size(cdb), + (unsigned long)sizeof(cmd->__t_task_cdb)); + ret = TCM_OUT_OF_RESOURCES; + goto err; + } + } + /* + * Copy the original CDB into cmd-> + */ + memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); + + trace_target_sequencer_start(cmd); + return 0; + +err: + /* + * Copy the CDB here to allow trace_target_cmd_complete() to + * print the cdb to the trace buffers. + */ + memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), + (unsigned int)TCM_MAX_COMMAND_SIZE)); + return ret; +} +EXPORT_SYMBOL(target_cmd_init_cdb); + +sense_reason_t +target_cmd_parse_cdb(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + sense_reason_t ret; + + ret = dev->transport->parse_cdb(cmd); + if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) + pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", + cmd->se_tfo->fabric_name, + cmd->se_sess->se_node_acl->initiatorname, + cmd->t_task_cdb[0]); + if (ret) + return ret; + + ret = transport_check_alloc_task_attr(cmd); + if (ret) + return ret; + + cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; + atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); + return 0; +} +EXPORT_SYMBOL(target_cmd_parse_cdb); + +/* + * Used by fabric module frontends to queue tasks directly. + * May only be used from process context. + */ +int transport_handle_cdb_direct( + struct se_cmd *cmd) +{ + sense_reason_t ret; + + might_sleep(); + + if (!cmd->se_lun) { + dump_stack(); + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; + } + + /* + * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that + * outstanding descriptors are handled correctly during shutdown via + * transport_wait_for_tasks() + * + * Also, we don't take cmd->t_state_lock here as we only expect + * this to be called for initial descriptor submission. + */ + cmd->t_state = TRANSPORT_NEW_CMD; + cmd->transport_state |= CMD_T_ACTIVE; + + /* + * transport_generic_new_cmd() is already handling QUEUE_FULL, + * so follow TRANSPORT_NEW_CMD processing thread context usage + * and call transport_generic_request_failure() if necessary.. + */ + ret = transport_generic_new_cmd(cmd); + if (ret) + transport_generic_request_failure(cmd, ret); + return 0; +} +EXPORT_SYMBOL(transport_handle_cdb_direct); + +sense_reason_t +transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) +{ + if (!sgl || !sgl_count) + return 0; + + /* + * Reject SCSI data overflow with map_mem_to_cmd() as incoming + * scatterlists already have been set to follow what the fabric + * passes for the original expected data transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + pr_warn("Rejecting SCSI DATA overflow for fabric using" + " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); + return TCM_INVALID_CDB_FIELD; + } + + cmd->t_data_sg = sgl; + cmd->t_data_nents = sgl_count; + cmd->t_bidi_data_sg = sgl_bidi; + cmd->t_bidi_data_nents = sgl_bidi_count; + + cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + return 0; +} + +/** + * target_init_cmd - initialize se_cmd + * @se_cmd: command descriptor to init + * @se_sess: associated se_sess for endpoint + * @sense: pointer to SCSI sense buffer + * @unpacked_lun: unpacked LUN to reference for struct se_lun + * @data_length: fabric expected data transfer length + * @task_attr: SAM task attribute + * @data_dir: DMA data direction + * @flags: flags for command submission from target_sc_flags_tables + * + * Task tags are supported if the caller has set @se_cmd->tag. + * + * Returns: + * - less than zero to signal active I/O shutdown failure. + * - zero on success. + * + * If the fabric driver calls target_stop_session, then it must check the + * return code and handle failures. This will never fail for other drivers, + * and the return code can be ignored. + */ +int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *sense, u64 unpacked_lun, + u32 data_length, int task_attr, int data_dir, int flags) +{ + struct se_portal_group *se_tpg; + + se_tpg = se_sess->se_tpg; + BUG_ON(!se_tpg); + BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); + + if (flags & TARGET_SCF_USE_CPUID) + se_cmd->se_cmd_flags |= SCF_USE_CPUID; + /* + * Signal bidirectional data payloads to target-core + */ + if (flags & TARGET_SCF_BIDI_OP) + se_cmd->se_cmd_flags |= SCF_BIDI; + + if (flags & TARGET_SCF_UNKNOWN_SIZE) + se_cmd->unknown_data_length = 1; + /* + * Initialize se_cmd for target operation. From this point + * exceptions are handled by sending exception status via + * target_core_fabric_ops->queue_status() callback + */ + __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, + data_dir, task_attr, sense, unpacked_lun, + se_sess->cmd_cnt); + + /* + * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is + * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second + * kref_put() to happen during fabric packet acknowledgement. + */ + return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); +} +EXPORT_SYMBOL_GPL(target_init_cmd); + +/** + * target_submit_prep - prepare cmd for submission + * @se_cmd: command descriptor to prep + * @cdb: pointer to SCSI CDB + * @sgl: struct scatterlist memory for unidirectional mapping + * @sgl_count: scatterlist count for unidirectional mapping + * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping + * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information + * @gfp: gfp allocation type + * + * Returns: + * - less than zero to signal failure. + * - zero on success. + * + * If failure is returned, lio will the callers queue_status to complete + * the cmd. + */ +int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, + struct scatterlist *sgl, u32 sgl_count, + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, + struct scatterlist *sgl_prot, u32 sgl_prot_count, + gfp_t gfp) +{ + sense_reason_t rc; + + rc = target_cmd_init_cdb(se_cmd, cdb, gfp); + if (rc) + goto send_cc_direct; + + /* + * Locate se_lun pointer and attach it to struct se_cmd + */ + rc = transport_lookup_cmd_lun(se_cmd); + if (rc) + goto send_cc_direct; + + rc = target_cmd_parse_cdb(se_cmd); + if (rc != 0) + goto generic_fail; + + /* + * Save pointers for SGLs containing protection information, + * if present. + */ + if (sgl_prot_count) { + se_cmd->t_prot_sg = sgl_prot; + se_cmd->t_prot_nents = sgl_prot_count; + se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; + } + + /* + * When a non zero sgl_count has been passed perform SGL passthrough + * mapping for pre-allocated fabric memory instead of having target + * core perform an internal SGL allocation.. + */ + if (sgl_count != 0) { + BUG_ON(!sgl); + + rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, + sgl_bidi, sgl_bidi_count); + if (rc != 0) + goto generic_fail; + } + + return 0; + +send_cc_direct: + transport_send_check_condition_and_sense(se_cmd, rc, 0); + target_put_sess_cmd(se_cmd); + return -EIO; + +generic_fail: + transport_generic_request_failure(se_cmd, rc); + return -EIO; +} +EXPORT_SYMBOL_GPL(target_submit_prep); + +/** + * target_submit - perform final initialization and submit cmd to LIO core + * @se_cmd: command descriptor to submit + * + * target_submit_prep must have been called on the cmd, and this must be + * called from process context. + */ +void target_submit(struct se_cmd *se_cmd) +{ + struct scatterlist *sgl = se_cmd->t_data_sg; + unsigned char *buf = NULL; + + might_sleep(); + + if (se_cmd->t_data_nents != 0) { + BUG_ON(!sgl); + /* + * A work-around for tcm_loop as some userspace code via + * scsi-generic do not memset their associated read buffers, + * so go ahead and do that here for type non-data CDBs. Also + * note that this is currently guaranteed to be a single SGL + * for this case by target core in target_setup_cmd_from_cdb() + * -> transport_generic_cmd_sequencer(). + */ + if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && + se_cmd->data_direction == DMA_FROM_DEVICE) { + if (sgl) + buf = kmap(sg_page(sgl)) + sgl->offset; + + if (buf) { + memset(buf, 0, sgl->length); + kunmap(sg_page(sgl)); + } + } + + } + + /* + * Check if we need to delay processing because of ALUA + * Active/NonOptimized primary access state.. + */ + core_alua_check_nonop_delay(se_cmd); + + transport_handle_cdb_direct(se_cmd); +} +EXPORT_SYMBOL_GPL(target_submit); + +/** + * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd + * + * @se_cmd: command descriptor to submit + * @se_sess: associated se_sess for endpoint + * @cdb: pointer to SCSI CDB + * @sense: pointer to SCSI sense buffer + * @unpacked_lun: unpacked LUN to reference for struct se_lun + * @data_length: fabric expected data transfer length + * @task_attr: SAM task attribute + * @data_dir: DMA data direction + * @flags: flags for command submission from target_sc_flags_tables + * + * Task tags are supported if the caller has set @se_cmd->tag. + * + * This may only be called from process context, and also currently + * assumes internal allocation of fabric payload buffer by target-core. + * + * It also assumes interal target core SGL memory allocation. + * + * This function must only be used by drivers that do their own + * sync during shutdown and does not use target_stop_session. If there + * is a failure this function will call into the fabric driver's + * queue_status with a CHECK_CONDITION. + */ +void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, + u32 data_length, int task_attr, int data_dir, int flags) +{ + int rc; + + rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length, + task_attr, data_dir, flags); + WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n"); + if (rc) + return; + + if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, + GFP_KERNEL)) + return; + + target_submit(se_cmd); +} +EXPORT_SYMBOL(target_submit_cmd); + + +static struct se_dev_plug *target_plug_device(struct se_device *se_dev) +{ + struct se_dev_plug *se_plug; + + if (!se_dev->transport->plug_device) + return NULL; + + se_plug = se_dev->transport->plug_device(se_dev); + if (!se_plug) + return NULL; + + se_plug->se_dev = se_dev; + /* + * We have a ref to the lun at this point, but the cmds could + * complete before we unplug, so grab a ref to the se_device so we + * can call back into the backend. + */ + config_group_get(&se_dev->dev_group); + return se_plug; +} + +static void target_unplug_device(struct se_dev_plug *se_plug) +{ + struct se_device *se_dev = se_plug->se_dev; + + se_dev->transport->unplug_device(se_plug); + config_group_put(&se_dev->dev_group); +} + +void target_queued_submit_work(struct work_struct *work) +{ + struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); + struct se_cmd *se_cmd, *next_cmd; + struct se_dev_plug *se_plug = NULL; + struct se_device *se_dev = NULL; + struct llist_node *cmd_list; + + cmd_list = llist_del_all(&sq->cmd_list); + if (!cmd_list) + /* Previous call took what we were queued to submit */ + return; + + cmd_list = llist_reverse_order(cmd_list); + llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) { + if (!se_dev) { + se_dev = se_cmd->se_dev; + se_plug = target_plug_device(se_dev); + } + + target_submit(se_cmd); + } + + if (se_plug) + target_unplug_device(se_plug); +} + +/** + * target_queue_submission - queue the cmd to run on the LIO workqueue + * @se_cmd: command descriptor to submit + */ +void target_queue_submission(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + int cpu = se_cmd->cpuid; + struct se_cmd_queue *sq; + + sq = &se_dev->queues[cpu].sq; + llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); + queue_work_on(cpu, target_submission_wq, &sq->work); +} +EXPORT_SYMBOL_GPL(target_queue_submission); + +static void target_complete_tmr_failure(struct work_struct *work) +{ + struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); + + se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; + se_cmd->se_tfo->queue_tm_rsp(se_cmd); + + transport_lun_remove_cmd(se_cmd); + transport_cmd_check_stop_to_fabric(se_cmd); +} + +/** + * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd + * for TMR CDBs + * + * @se_cmd: command descriptor to submit + * @se_sess: associated se_sess for endpoint + * @sense: pointer to SCSI sense buffer + * @unpacked_lun: unpacked LUN to reference for struct se_lun + * @fabric_tmr_ptr: fabric context for TMR req + * @tm_type: Type of TM request + * @gfp: gfp type for caller + * @tag: referenced task tag for TMR_ABORT_TASK + * @flags: submit cmd flags + * + * Callable from all contexts. + **/ + +int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *sense, u64 unpacked_lun, + void *fabric_tmr_ptr, unsigned char tm_type, + gfp_t gfp, u64 tag, int flags) +{ + struct se_portal_group *se_tpg; + int ret; + + se_tpg = se_sess->se_tpg; + BUG_ON(!se_tpg); + + __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, + 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun, + se_sess->cmd_cnt); + /* + * FIXME: Currently expect caller to handle se_cmd->se_tmr_req + * allocation failure. + */ + ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); + if (ret < 0) + return -ENOMEM; + + if (tm_type == TMR_ABORT_TASK) + se_cmd->se_tmr_req->ref_task_tag = tag; + + /* See target_submit_cmd for commentary */ + ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); + if (ret) { + core_tmr_release_req(se_cmd->se_tmr_req); + return ret; + } + + ret = transport_lookup_tmr_lun(se_cmd); + if (ret) + goto failure; + + transport_generic_handle_tmr(se_cmd); + return 0; + + /* + * For callback during failure handling, push this work off + * to process context with TMR_LUN_DOES_NOT_EXIST status. + */ +failure: + INIT_WORK(&se_cmd->work, target_complete_tmr_failure); + schedule_work(&se_cmd->work); + return 0; +} +EXPORT_SYMBOL(target_submit_tmr); + +/* + * Handle SAM-esque emulation for generic transport request failures. + */ +void transport_generic_request_failure(struct se_cmd *cmd, + sense_reason_t sense_reason) +{ + int ret = 0, post_ret; + + pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", + sense_reason); + target_show_cmd("-----[ ", cmd); + + /* + * For SAM Task Attribute emulation for failed struct se_cmd + */ + transport_complete_task_attr(cmd); + + if (cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd, false, &post_ret); + + if (cmd->transport_state & CMD_T_ABORTED) { + INIT_WORK(&cmd->work, target_abort_work); + queue_work(target_completion_wq, &cmd->work); + return; + } + + switch (sense_reason) { + case TCM_NON_EXISTENT_LUN: + case TCM_UNSUPPORTED_SCSI_OPCODE: + case TCM_INVALID_CDB_FIELD: + case TCM_INVALID_PARAMETER_LIST: + case TCM_PARAMETER_LIST_LENGTH_ERROR: + case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: + case TCM_UNKNOWN_MODE_PAGE: + case TCM_WRITE_PROTECTED: + case TCM_ADDRESS_OUT_OF_RANGE: + case TCM_CHECK_CONDITION_ABORT_CMD: + case TCM_CHECK_CONDITION_UNIT_ATTENTION: + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: + case TCM_TOO_MANY_TARGET_DESCS: + case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: + case TCM_TOO_MANY_SEGMENT_DESCS: + case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: + case TCM_INVALID_FIELD_IN_COMMAND_IU: + case TCM_ALUA_TG_PT_STANDBY: + case TCM_ALUA_TG_PT_UNAVAILABLE: + case TCM_ALUA_STATE_TRANSITION: + case TCM_ALUA_OFFLINE: + break; + case TCM_OUT_OF_RESOURCES: + cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + goto queue_status; + case TCM_LUN_BUSY: + cmd->scsi_status = SAM_STAT_BUSY; + goto queue_status; + case TCM_RESERVATION_CONFLICT: + /* + * No SENSE Data payload for this case, set SCSI Status + * and queue the response to $FABRIC_MOD. + * + * Uses linux/include/scsi/scsi.h SAM status codes defs + */ + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; + /* + * For UA Interlock Code 11b, a RESERVATION CONFLICT will + * establish a UNIT ATTENTION with PREVIOUS RESERVATION + * CONFLICT STATUS. + * + * See spc4r17, section 7.4.6 Control Mode Page, Table 349 + */ + if (cmd->se_sess && + cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl + == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { + target_ua_allocate_lun(cmd->se_sess->se_node_acl, + cmd->orig_fe_lun, 0x2C, + ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); + } + + goto queue_status; + default: + pr_err("Unknown transport error for CDB 0x%02x: %d\n", + cmd->t_task_cdb[0], sense_reason); + sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + break; + } + + ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); + if (ret) + goto queue_full; + +check_stop: + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + +queue_status: + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (!ret) + goto check_stop; +queue_full: + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); +} +EXPORT_SYMBOL(transport_generic_request_failure); + +void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) +{ + sense_reason_t ret; + + if (!cmd->execute_cmd) { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto err; + } + if (do_checks) { + /* + * Check for an existing UNIT ATTENTION condition after + * target_handle_task_attr() has done SAM task attr + * checking, and possibly have already defered execution + * out to target_restart_delayed_cmds() context. + */ + ret = target_scsi3_ua_check(cmd); + if (ret) + goto err; + + ret = target_alua_state_check(cmd); + if (ret) + goto err; + + ret = target_check_reservation(cmd); + if (ret) { + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; + goto err; + } + } + + ret = cmd->execute_cmd(cmd); + if (!ret) + return; +err: + spin_lock_irq(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + + transport_generic_request_failure(cmd, ret); +} + +static int target_write_prot_action(struct se_cmd *cmd) +{ + u32 sectors; + /* + * Perform WRITE_INSERT of PI using software emulation when backend + * device has PI enabled, if the transport has not already generated + * PI using hardware WRITE_INSERT offload. + */ + switch (cmd->prot_op) { + case TARGET_PROT_DOUT_INSERT: + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) + sbc_dif_generate(cmd); + break; + case TARGET_PROT_DOUT_STRIP: + if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) + break; + + sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); + cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, + sectors, 0, cmd->t_prot_sg, 0); + if (unlikely(cmd->pi_err)) { + spin_lock_irq(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + transport_generic_request_failure(cmd, cmd->pi_err); + return -1; + } + break; + default: + break; + } + + return 0; +} + +static bool target_handle_task_attr(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + return false; + + cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; + + /* + * Check for the existence of HEAD_OF_QUEUE, and if true return 1 + * to allow the passed struct se_cmd list of tasks to the front of the list. + */ + switch (cmd->sam_task_attr) { + case TCM_HEAD_TAG: + atomic_inc_mb(&dev->non_ordered); + pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", + cmd->t_task_cdb[0]); + return false; + case TCM_ORDERED_TAG: + atomic_inc_mb(&dev->delayed_cmd_count); + + pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", + cmd->t_task_cdb[0]); + break; + default: + /* + * For SIMPLE and UNTAGGED Task Attribute commands + */ + atomic_inc_mb(&dev->non_ordered); + + if (atomic_read(&dev->delayed_cmd_count) == 0) + return false; + break; + } + + if (cmd->sam_task_attr != TCM_ORDERED_TAG) { + atomic_inc_mb(&dev->delayed_cmd_count); + /* + * We will account for this when we dequeue from the delayed + * list. + */ + atomic_dec_mb(&dev->non_ordered); + } + + spin_lock_irq(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + + spin_lock(&dev->delayed_cmd_lock); + list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); + spin_unlock(&dev->delayed_cmd_lock); + + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", + cmd->t_task_cdb[0], cmd->sam_task_attr); + /* + * We may have no non ordered cmds when this function started or we + * could have raced with the last simple/head cmd completing, so kick + * the delayed handler here. + */ + schedule_work(&dev->delayed_cmd_work); + return true; +} + +void target_execute_cmd(struct se_cmd *cmd) +{ + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + * + * If the received CDB has already been aborted stop processing it here. + */ + if (target_cmd_interrupted(cmd)) + return; + + spin_lock_irq(&cmd->t_state_lock); + cmd->t_state = TRANSPORT_PROCESSING; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + + if (target_write_prot_action(cmd)) + return; + + if (target_handle_task_attr(cmd)) + return; + + __target_execute_cmd(cmd, true); +} +EXPORT_SYMBOL(target_execute_cmd); + +/* + * Process all commands up to the last received ORDERED task attribute which + * requires another blocking boundary + */ +void target_do_delayed_work(struct work_struct *work) +{ + struct se_device *dev = container_of(work, struct se_device, + delayed_cmd_work); + + spin_lock(&dev->delayed_cmd_lock); + while (!dev->ordered_sync_in_progress) { + struct se_cmd *cmd; + + if (list_empty(&dev->delayed_cmd_list)) + break; + + cmd = list_entry(dev->delayed_cmd_list.next, + struct se_cmd, se_delayed_node); + + if (cmd->sam_task_attr == TCM_ORDERED_TAG) { + /* + * Check if we started with: + * [ordered] [simple] [ordered] + * and we are now at the last ordered so we have to wait + * for the simple cmd. + */ + if (atomic_read(&dev->non_ordered) > 0) + break; + + dev->ordered_sync_in_progress = true; + } + + list_del(&cmd->se_delayed_node); + atomic_dec_mb(&dev->delayed_cmd_count); + spin_unlock(&dev->delayed_cmd_lock); + + if (cmd->sam_task_attr != TCM_ORDERED_TAG) + atomic_inc_mb(&dev->non_ordered); + + cmd->transport_state |= CMD_T_SENT; + + __target_execute_cmd(cmd, true); + + spin_lock(&dev->delayed_cmd_lock); + } + spin_unlock(&dev->delayed_cmd_lock); +} + +/* + * Called from I/O completion to determine which dormant/delayed + * and ordered cmds need to have their tasks added to the execution queue. + */ +static void transport_complete_task_attr(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + return; + + if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) + goto restart; + + if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { + atomic_dec_mb(&dev->non_ordered); + dev->dev_cur_ordered_id++; + } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { + atomic_dec_mb(&dev->non_ordered); + dev->dev_cur_ordered_id++; + pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", + dev->dev_cur_ordered_id); + } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { + spin_lock(&dev->delayed_cmd_lock); + dev->ordered_sync_in_progress = false; + spin_unlock(&dev->delayed_cmd_lock); + + dev->dev_cur_ordered_id++; + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", + dev->dev_cur_ordered_id); + } + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; + +restart: + if (atomic_read(&dev->delayed_cmd_count) > 0) + schedule_work(&dev->delayed_cmd_work); +} + +static void transport_complete_qf(struct se_cmd *cmd) +{ + int ret = 0; + + transport_complete_task_attr(cmd); + /* + * If a fabric driver ->write_pending() or ->queue_data_in() callback + * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and + * the same callbacks should not be retried. Return CHECK_CONDITION + * if a scsi_status is not already set. + * + * If a fabric driver ->queue_status() has returned non zero, always + * keep retrying no matter what.. + */ + if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { + if (cmd->scsi_status) + goto queue_status; + + translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); + goto queue_status; + } + + /* + * Check if we need to send a sense buffer from + * the struct se_cmd in question. We do NOT want + * to take this path of the IO has been marked as + * needing to be treated like a "normal read". This + * is the case if it's a tape read, and either the + * FM, EOM, or ILI bits are set, but there is no + * sense data. + */ + if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && + cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + goto queue_status; + + switch (cmd->data_direction) { + case DMA_FROM_DEVICE: + /* queue status if not treating this as a normal read */ + if (cmd->scsi_status && + !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) + goto queue_status; + + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + break; + case DMA_TO_DEVICE: + if (cmd->se_cmd_flags & SCF_BIDI) { + ret = cmd->se_tfo->queue_data_in(cmd); + break; + } + fallthrough; + case DMA_NONE: +queue_status: + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_status(cmd); + break; + default: + break; + } + + if (ret < 0) { + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); + return; + } + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); +} + +static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, + int err, bool write_pending) +{ + /* + * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or + * ->queue_data_in() callbacks from new process context. + * + * Otherwise for other errors, transport_complete_qf() will send + * CHECK_CONDITION via ->queue_status() instead of attempting to + * retry associated fabric driver data-transfer callbacks. + */ + if (err == -EAGAIN || err == -ENOMEM) { + cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : + TRANSPORT_COMPLETE_QF_OK; + } else { + pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); + cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; + } + + spin_lock_irq(&dev->qf_cmd_lock); + list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); + atomic_inc_mb(&dev->dev_qf_count); + spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); + + schedule_work(&cmd->se_dev->qf_work_queue); +} + +static bool target_read_prot_action(struct se_cmd *cmd) +{ + switch (cmd->prot_op) { + case TARGET_PROT_DIN_STRIP: + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { + u32 sectors = cmd->data_length >> + ilog2(cmd->se_dev->dev_attrib.block_size); + + cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, + sectors, 0, cmd->t_prot_sg, + 0); + if (cmd->pi_err) + return true; + } + break; + case TARGET_PROT_DIN_INSERT: + if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) + break; + + sbc_dif_generate(cmd); + break; + default: + break; + } + + return false; +} + +static void target_complete_ok_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + int ret; + + /* + * Check if we need to move delayed/dormant tasks from cmds on the + * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task + * Attribute. + */ + transport_complete_task_attr(cmd); + + /* + * Check to schedule QUEUE_FULL work, or execute an existing + * cmd->transport_qf_callback() + */ + if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) + schedule_work(&cmd->se_dev->qf_work_queue); + + /* + * Check if we need to send a sense buffer from + * the struct se_cmd in question. We do NOT want + * to take this path of the IO has been marked as + * needing to be treated like a "normal read". This + * is the case if it's a tape read, and either the + * FM, EOM, or ILI bits are set, but there is no + * sense data. + */ + if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && + cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + WARN_ON(!cmd->scsi_status); + ret = transport_send_check_condition_and_sense( + cmd, 0, 1); + if (ret) + goto queue_full; + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } + /* + * Check for a callback, used by amongst other things + * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. + */ + if (cmd->transport_complete_callback) { + sense_reason_t rc; + bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); + bool zero_dl = !(cmd->data_length); + int post_ret = 0; + + rc = cmd->transport_complete_callback(cmd, true, &post_ret); + if (!rc && !post_ret) { + if (caw && zero_dl) + goto queue_rsp; + + return; + } else if (rc) { + ret = transport_send_check_condition_and_sense(cmd, + rc, 0); + if (ret) + goto queue_full; + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } + } + +queue_rsp: + switch (cmd->data_direction) { + case DMA_FROM_DEVICE: + /* + * if this is a READ-type IO, but SCSI status + * is set, then skip returning data and just + * return the status -- unless this IO is marked + * as needing to be treated as a normal read, + * in which case we want to go ahead and return + * the data. This happens, for example, for tape + * reads with the FM, EOM, or ILI bits set, with + * no sense data. + */ + if (cmd->scsi_status && + !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) + goto queue_status; + + atomic_long_add(cmd->data_length, + &cmd->se_lun->lun_stats.tx_data_octets); + /* + * Perform READ_STRIP of PI using software emulation when + * backend had PI enabled, if the transport will not be + * performing hardware READ_STRIP offload. + */ + if (target_read_prot_action(cmd)) { + ret = transport_send_check_condition_and_sense(cmd, + cmd->pi_err, 0); + if (ret) + goto queue_full; + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } + + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret) + goto queue_full; + break; + case DMA_TO_DEVICE: + atomic_long_add(cmd->data_length, + &cmd->se_lun->lun_stats.rx_data_octets); + /* + * Check if we need to send READ payload for BIDI-COMMAND + */ + if (cmd->se_cmd_flags & SCF_BIDI) { + atomic_long_add(cmd->data_length, + &cmd->se_lun->lun_stats.tx_data_octets); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret) + goto queue_full; + break; + } + fallthrough; + case DMA_NONE: +queue_status: + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + goto queue_full; + break; + default: + break; + } + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + +queue_full: + pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," + " data_direction: %d\n", cmd, cmd->data_direction); + + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); +} + +void target_free_sgl(struct scatterlist *sgl, int nents) +{ + sgl_free_n_order(sgl, nents, 0); +} +EXPORT_SYMBOL(target_free_sgl); + +static inline void transport_reset_sgl_orig(struct se_cmd *cmd) +{ + /* + * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE + * emulation, and free + reset pointers if necessary.. + */ + if (!cmd->t_data_sg_orig) + return; + + kfree(cmd->t_data_sg); + cmd->t_data_sg = cmd->t_data_sg_orig; + cmd->t_data_sg_orig = NULL; + cmd->t_data_nents = cmd->t_data_nents_orig; + cmd->t_data_nents_orig = 0; +} + +static inline void transport_free_pages(struct se_cmd *cmd) +{ + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { + target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); + cmd->t_prot_sg = NULL; + cmd->t_prot_nents = 0; + } + + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { + /* + * Release special case READ buffer payload required for + * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE + */ + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { + target_free_sgl(cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + cmd->t_bidi_data_sg = NULL; + cmd->t_bidi_data_nents = 0; + } + transport_reset_sgl_orig(cmd); + return; + } + transport_reset_sgl_orig(cmd); + + target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); + cmd->t_data_sg = NULL; + cmd->t_data_nents = 0; + + target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); + cmd->t_bidi_data_sg = NULL; + cmd->t_bidi_data_nents = 0; +} + +void *transport_kmap_data_sg(struct se_cmd *cmd) +{ + struct scatterlist *sg = cmd->t_data_sg; + struct page **pages; + int i; + + /* + * We need to take into account a possible offset here for fabrics like + * tcm_loop who may be using a contig buffer from the SCSI midlayer for + * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() + */ + if (!cmd->t_data_nents) + return NULL; + + BUG_ON(!sg); + if (cmd->t_data_nents == 1) + return kmap(sg_page(sg)) + sg->offset; + + /* >1 page. use vmap */ + pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); + if (!pages) + return NULL; + + /* convert sg[] to pages[] */ + for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { + pages[i] = sg_page(sg); + } + + cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!cmd->t_data_vmap) + return NULL; + + return cmd->t_data_vmap + cmd->t_data_sg[0].offset; +} +EXPORT_SYMBOL(transport_kmap_data_sg); + +void transport_kunmap_data_sg(struct se_cmd *cmd) +{ + if (!cmd->t_data_nents) { + return; + } else if (cmd->t_data_nents == 1) { + kunmap(sg_page(cmd->t_data_sg)); + return; + } + + vunmap(cmd->t_data_vmap); + cmd->t_data_vmap = NULL; +} +EXPORT_SYMBOL(transport_kunmap_data_sg); + +int +target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, + bool zero_page, bool chainable) +{ + gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); + + *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); + return *sgl ? 0 : -ENOMEM; +} +EXPORT_SYMBOL(target_alloc_sgl); + +/* + * Allocate any required resources to execute the command. For writes we + * might not have the payload yet, so notify the fabric via a call to + * ->write_pending instead. Otherwise place it on the execution queue. + */ +sense_reason_t +transport_generic_new_cmd(struct se_cmd *cmd) +{ + unsigned long flags; + int ret = 0; + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); + + if (cmd->prot_op != TARGET_PROT_NORMAL && + !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { + ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, + cmd->prot_length, true, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + /* + * Determine if the TCM fabric module has already allocated physical + * memory, and is directly calling transport_generic_map_mem_to_cmd() + * beforehand. + */ + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && + cmd->data_length) { + + if ((cmd->se_cmd_flags & SCF_BIDI) || + (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { + u32 bidi_length; + + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) + bidi_length = cmd->t_task_nolb * + cmd->se_dev->dev_attrib.block_size; + else + bidi_length = cmd->data_length; + + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, + &cmd->t_bidi_data_nents, + bidi_length, zero_flag, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, + cmd->data_length, zero_flag, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && + cmd->data_length) { + /* + * Special case for COMPARE_AND_WRITE with fabrics + * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. + */ + u32 caw_length = cmd->t_task_nolb * + cmd->se_dev->dev_attrib.block_size; + + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, + &cmd->t_bidi_data_nents, + caw_length, zero_flag, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + /* + * If this command is not a write we can execute it right here, + * for write buffers we need to notify the fabric driver first + * and let it call back once the write buffers are ready. + */ + target_add_to_state_list(cmd); + if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { + target_execute_cmd(cmd); + return 0; + } + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + */ + if (cmd->transport_state & CMD_T_STOP && + !cmd->se_tfo->write_pending_must_be_called) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + complete_all(&cmd->t_transport_stop_comp); + return 0; + } + cmd->transport_state &= ~CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + ret = cmd->se_tfo->write_pending(cmd); + if (ret) + goto queue_full; + + return 0; + +queue_full: + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); + return 0; +} +EXPORT_SYMBOL(transport_generic_new_cmd); + +static void transport_write_pending_qf(struct se_cmd *cmd) +{ + unsigned long flags; + int ret; + bool stop; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (stop) { + pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + complete_all(&cmd->t_transport_stop_comp); + return; + } + + ret = cmd->se_tfo->write_pending(cmd); + if (ret) { + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", + cmd); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); + } +} + +static bool +__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, + unsigned long *flags); + +static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); +} + +/* + * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has + * finished. + */ +void target_put_cmd_and_wait(struct se_cmd *cmd) +{ + DECLARE_COMPLETION_ONSTACK(compl); + + WARN_ON_ONCE(cmd->abrt_compl); + cmd->abrt_compl = &compl; + target_put_sess_cmd(cmd); + wait_for_completion(&compl); +} + +/* + * This function is called by frontend drivers after processing of a command + * has finished. + * + * The protocol for ensuring that either the regular frontend command + * processing flow or target_handle_abort() code drops one reference is as + * follows: + * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause + * the frontend driver to call this function synchronously or asynchronously. + * That will cause one reference to be dropped. + * - During regular command processing the target core sets CMD_T_COMPLETE + * before invoking one of the .queue_*() functions. + * - The code that aborts commands skips commands and TMFs for which + * CMD_T_COMPLETE has been set. + * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for + * commands that will be aborted. + * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set + * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). + * - For aborted commands for which CMD_T_TAS has been set .queue_status() will + * be called and will drop a reference. + * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() + * will be called. target_handle_abort() will drop the final reference. + */ +int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) +{ + DECLARE_COMPLETION_ONSTACK(compl); + int ret = 0; + bool aborted = false, tas = false; + + if (wait_for_tasks) + target_wait_free_cmd(cmd, &aborted, &tas); + + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { + /* + * Handle WRITE failure case where transport_generic_new_cmd() + * has already added se_cmd to state_list, but fabric has + * failed command before I/O submission. + */ + if (cmd->state_active) + target_remove_from_state_list(cmd); + + if (cmd->se_lun) + transport_lun_remove_cmd(cmd); + } + if (aborted) + cmd->free_compl = &compl; + ret = target_put_sess_cmd(cmd); + if (aborted) { + pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); + wait_for_completion(&compl); + ret = 1; + } + return ret; +} +EXPORT_SYMBOL(transport_generic_free_cmd); + +/** + * target_get_sess_cmd - Verify the session is accepting cmds and take ref + * @se_cmd: command descriptor to add + * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() + */ +int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) +{ + int ret = 0; + + /* + * Add a second kref if the fabric caller is expecting to handle + * fabric acknowledgement that requires two target_put_sess_cmd() + * invocations before se_cmd descriptor release. + */ + if (ack_kref) { + kref_get(&se_cmd->cmd_kref); + se_cmd->se_cmd_flags |= SCF_ACK_KREF; + } + + /* + * Users like xcopy do not use counters since they never do a stop + * and wait. + */ + if (se_cmd->cmd_cnt) { + if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt)) + ret = -ESHUTDOWN; + } + if (ret && ack_kref) + target_put_sess_cmd(se_cmd); + + return ret; +} +EXPORT_SYMBOL(target_get_sess_cmd); + +static void target_free_cmd_mem(struct se_cmd *cmd) +{ + transport_free_pages(cmd); + + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + core_tmr_release_req(cmd->se_tmr_req); + if (cmd->t_task_cdb != cmd->__t_task_cdb) + kfree(cmd->t_task_cdb); +} + +static void target_release_cmd_kref(struct kref *kref) +{ + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); + struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt; + struct completion *free_compl = se_cmd->free_compl; + struct completion *abrt_compl = se_cmd->abrt_compl; + + target_free_cmd_mem(se_cmd); + se_cmd->se_tfo->release_cmd(se_cmd); + if (free_compl) + complete(free_compl); + if (abrt_compl) + complete(abrt_compl); + + if (cmd_cnt) + percpu_ref_put(&cmd_cnt->refcnt); +} + +/** + * target_put_sess_cmd - decrease the command reference count + * @se_cmd: command to drop a reference from + * + * Returns 1 if and only if this target_put_sess_cmd() call caused the + * refcount to drop to zero. Returns zero otherwise. + */ +int target_put_sess_cmd(struct se_cmd *se_cmd) +{ + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); +} +EXPORT_SYMBOL(target_put_sess_cmd); + +static const char *data_dir_name(enum dma_data_direction d) +{ + switch (d) { + case DMA_BIDIRECTIONAL: return "BIDI"; + case DMA_TO_DEVICE: return "WRITE"; + case DMA_FROM_DEVICE: return "READ"; + case DMA_NONE: return "NONE"; + } + + return "(?)"; +} + +static const char *cmd_state_name(enum transport_state_table t) +{ + switch (t) { + case TRANSPORT_NO_STATE: return "NO_STATE"; + case TRANSPORT_NEW_CMD: return "NEW_CMD"; + case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; + case TRANSPORT_PROCESSING: return "PROCESSING"; + case TRANSPORT_COMPLETE: return "COMPLETE"; + case TRANSPORT_ISTATE_PROCESSING: + return "ISTATE_PROCESSING"; + case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; + case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; + case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; + } + + return "(?)"; +} + +static void target_append_str(char **str, const char *txt) +{ + char *prev = *str; + + *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : + kstrdup(txt, GFP_ATOMIC); + kfree(prev); +} + +/* + * Convert a transport state bitmask into a string. The caller is + * responsible for freeing the returned pointer. + */ +static char *target_ts_to_str(u32 ts) +{ + char *str = NULL; + + if (ts & CMD_T_ABORTED) + target_append_str(&str, "aborted"); + if (ts & CMD_T_ACTIVE) + target_append_str(&str, "active"); + if (ts & CMD_T_COMPLETE) + target_append_str(&str, "complete"); + if (ts & CMD_T_SENT) + target_append_str(&str, "sent"); + if (ts & CMD_T_STOP) + target_append_str(&str, "stop"); + if (ts & CMD_T_FABRIC_STOP) + target_append_str(&str, "fabric_stop"); + + return str; +} + +static const char *target_tmf_name(enum tcm_tmreq_table tmf) +{ + switch (tmf) { + case TMR_ABORT_TASK: return "ABORT_TASK"; + case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; + case TMR_CLEAR_ACA: return "CLEAR_ACA"; + case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; + case TMR_LUN_RESET: return "LUN_RESET"; + case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; + case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; + case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; + case TMR_UNKNOWN: break; + } + return "(?)"; +} + +void target_show_cmd(const char *pfx, struct se_cmd *cmd) +{ + char *ts_str = target_ts_to_str(cmd->transport_state); + const u8 *cdb = cmd->t_task_cdb; + struct se_tmr_req *tmf = cmd->se_tmr_req; + + if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { + pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", + pfx, cdb[0], cdb[1], cmd->tag, + data_dir_name(cmd->data_direction), + cmd->se_tfo->get_cmd_state(cmd), + cmd_state_name(cmd->t_state), cmd->data_length, + kref_read(&cmd->cmd_kref), ts_str); + } else { + pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", + pfx, target_tmf_name(tmf->function), cmd->tag, + tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), + cmd_state_name(cmd->t_state), + kref_read(&cmd->cmd_kref), ts_str); + } + kfree(ts_str); +} +EXPORT_SYMBOL(target_show_cmd); + +static void target_stop_cmd_counter_confirm(struct percpu_ref *ref) +{ + struct target_cmd_counter *cmd_cnt = container_of(ref, + struct target_cmd_counter, + refcnt); + complete_all(&cmd_cnt->stop_done); +} + +/** + * target_stop_cmd_counter - Stop new IO from being added to the counter. + * @cmd_cnt: counter to stop + */ +void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt) +{ + pr_debug("Stopping command counter.\n"); + if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1)) + percpu_ref_kill_and_confirm(&cmd_cnt->refcnt, + target_stop_cmd_counter_confirm); +} +EXPORT_SYMBOL_GPL(target_stop_cmd_counter); + +/** + * target_stop_session - Stop new IO from being queued on the session. + * @se_sess: session to stop + */ +void target_stop_session(struct se_session *se_sess) +{ + target_stop_cmd_counter(se_sess->cmd_cnt); +} +EXPORT_SYMBOL(target_stop_session); + +/** + * target_wait_for_cmds - Wait for outstanding cmds. + * @cmd_cnt: counter to wait for active I/O for. + */ +void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt) +{ + int ret; + + WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped)); + + do { + pr_debug("Waiting for running cmds to complete.\n"); + ret = wait_event_timeout(cmd_cnt->refcnt_wq, + percpu_ref_is_zero(&cmd_cnt->refcnt), + 180 * HZ); + } while (ret <= 0); + + wait_for_completion(&cmd_cnt->stop_done); + pr_debug("Waiting for cmds done.\n"); +} +EXPORT_SYMBOL_GPL(target_wait_for_cmds); + +/** + * target_wait_for_sess_cmds - Wait for outstanding commands + * @se_sess: session to wait for active I/O + */ +void target_wait_for_sess_cmds(struct se_session *se_sess) +{ + target_wait_for_cmds(se_sess->cmd_cnt); +} +EXPORT_SYMBOL(target_wait_for_sess_cmds); + +/* + * Prevent that new percpu_ref_tryget_live() calls succeed and wait until + * all references to the LUN have been released. Called during LUN shutdown. + */ +void transport_clear_lun_ref(struct se_lun *lun) +{ + percpu_ref_kill(&lun->lun_ref); + wait_for_completion(&lun->lun_shutdown_comp); +} + +static bool +__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, + bool *aborted, bool *tas, unsigned long *flags) + __releases(&cmd->t_state_lock) + __acquires(&cmd->t_state_lock) +{ + lockdep_assert_held(&cmd->t_state_lock); + + if (fabric_stop) + cmd->transport_state |= CMD_T_FABRIC_STOP; + + if (cmd->transport_state & CMD_T_ABORTED) + *aborted = true; + + if (cmd->transport_state & CMD_T_TAS) + *tas = true; + + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; + + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) + return false; + + if (!(cmd->transport_state & CMD_T_ACTIVE)) + return false; + + if (fabric_stop && *aborted) + return false; + + cmd->transport_state |= CMD_T_STOP; + + target_show_cmd("wait_for_tasks: Stopping ", cmd); + + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + + while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, + 180 * HZ)) + target_show_cmd("wait for tasks: ", cmd); + + spin_lock_irqsave(&cmd->t_state_lock, *flags); + cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); + + pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" + "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); + + return true; +} + +/** + * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp + * @cmd: command to wait on + */ +bool transport_wait_for_tasks(struct se_cmd *cmd) +{ + unsigned long flags; + bool ret, aborted = false, tas = false; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + return ret; +} +EXPORT_SYMBOL(transport_wait_for_tasks); + +struct sense_detail { + u8 key; + u8 asc; + u8 ascq; + bool add_sense_info; +}; + +static const struct sense_detail sense_detail_table[] = { + [TCM_NO_SENSE] = { + .key = NOT_READY + }, + [TCM_NON_EXISTENT_LUN] = { + .key = ILLEGAL_REQUEST, + .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ + }, + [TCM_UNSUPPORTED_SCSI_OPCODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ + }, + [TCM_SECTOR_COUNT_TOO_MANY] = { + .key = ILLEGAL_REQUEST, + .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ + }, + [TCM_UNKNOWN_MODE_PAGE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x24, /* INVALID FIELD IN CDB */ + }, + [TCM_CHECK_CONDITION_ABORT_CMD] = { + .key = ABORTED_COMMAND, + .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ + .ascq = 0x03, + }, + [TCM_INCORRECT_AMOUNT_OF_DATA] = { + .key = ABORTED_COMMAND, + .asc = 0x0c, /* WRITE ERROR */ + .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ + }, + [TCM_INVALID_CDB_FIELD] = { + .key = ILLEGAL_REQUEST, + .asc = 0x24, /* INVALID FIELD IN CDB */ + }, + [TCM_INVALID_PARAMETER_LIST] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ + }, + [TCM_TOO_MANY_TARGET_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ + }, + [TCM_TOO_MANY_SEGMENT_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ + }, + [TCM_PARAMETER_LIST_LENGTH_ERROR] = { + .key = ILLEGAL_REQUEST, + .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ + }, + [TCM_UNEXPECTED_UNSOLICITED_DATA] = { + .key = ILLEGAL_REQUEST, + .asc = 0x0c, /* WRITE ERROR */ + .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ + }, + [TCM_SERVICE_CRC_ERROR] = { + .key = ABORTED_COMMAND, + .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ + .ascq = 0x05, /* N/A */ + }, + [TCM_SNACK_REJECTED] = { + .key = ABORTED_COMMAND, + .asc = 0x11, /* READ ERROR */ + .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ + }, + [TCM_WRITE_PROTECTED] = { + .key = DATA_PROTECT, + .asc = 0x27, /* WRITE PROTECTED */ + }, + [TCM_ADDRESS_OUT_OF_RANGE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ + }, + [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { + .key = UNIT_ATTENTION, + }, + [TCM_MISCOMPARE_VERIFY] = { + .key = MISCOMPARE, + .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ + .ascq = 0x00, + .add_sense_info = true, + }, + [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { + .key = ABORTED_COMMAND, + .asc = 0x10, + .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ + .add_sense_info = true, + }, + [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { + .key = ABORTED_COMMAND, + .asc = 0x10, + .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ + .add_sense_info = true, + }, + [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { + .key = ABORTED_COMMAND, + .asc = 0x10, + .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ + .add_sense_info = true, + }, + [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { + .key = COPY_ABORTED, + .asc = 0x0d, + .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ + + }, + [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { + /* + * Returning ILLEGAL REQUEST would cause immediate IO errors on + * Solaris initiators. Returning NOT READY instead means the + * operations will be retried a finite number of times and we + * can survive intermittent errors. + */ + .key = NOT_READY, + .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ + }, + [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { + /* + * From spc4r22 section5.7.7,5.7.8 + * If a PERSISTENT RESERVE OUT command with a REGISTER service action + * or a REGISTER AND IGNORE EXISTING KEY service action or + * REGISTER AND MOVE service actionis attempted, + * but there are insufficient device server resources to complete the + * operation, then the command shall be terminated with CHECK CONDITION + * status, with the sense key set to ILLEGAL REQUEST,and the additonal + * sense code set to INSUFFICIENT REGISTRATION RESOURCES. + */ + .key = ILLEGAL_REQUEST, + .asc = 0x55, + .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ + }, + [TCM_INVALID_FIELD_IN_COMMAND_IU] = { + .key = ILLEGAL_REQUEST, + .asc = 0x0e, + .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */ + }, + [TCM_ALUA_TG_PT_STANDBY] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY, + }, + [TCM_ALUA_TG_PT_UNAVAILABLE] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE, + }, + [TCM_ALUA_STATE_TRANSITION] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_STATE_TRANSITION, + }, + [TCM_ALUA_OFFLINE] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_OFFLINE, + }, +}; + +/** + * translate_sense_reason - translate a sense reason into T10 key, asc and ascq + * @cmd: SCSI command in which the resulting sense buffer or SCSI status will + * be stored. + * @reason: LIO sense reason code. If this argument has the value + * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If + * dequeuing a unit attention fails due to multiple commands being processed + * concurrently, set the command status to BUSY. + * + * Return: 0 upon success or -EINVAL if the sense buffer is too small. + */ +static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) +{ + const struct sense_detail *sd; + u8 *buffer = cmd->sense_buffer; + int r = (__force int)reason; + u8 key, asc, ascq; + bool desc_format = target_sense_desc_format(cmd->se_dev); + + if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) + sd = &sense_detail_table[r]; + else + sd = &sense_detail_table[(__force int) + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; + + key = sd->key; + if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { + if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, + &ascq)) { + cmd->scsi_status = SAM_STAT_BUSY; + return; + } + } else { + WARN_ON_ONCE(sd->asc == 0); + asc = sd->asc; + ascq = sd->ascq; + } + + cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; + cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; + scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); + if (sd->add_sense_info) + WARN_ON_ONCE(scsi_set_sense_information(buffer, + cmd->scsi_sense_length, + cmd->sense_info) < 0); +} + +int +transport_send_check_condition_and_sense(struct se_cmd *cmd, + sense_reason_t reason, int from_transport) +{ + unsigned long flags; + + WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return 0; + } + cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (!from_transport) + translate_sense_reason(cmd, reason); + + trace_target_cmd_complete(cmd); + return cmd->se_tfo->queue_status(cmd); +} +EXPORT_SYMBOL(transport_send_check_condition_and_sense); + +/** + * target_send_busy - Send SCSI BUSY status back to the initiator + * @cmd: SCSI command for which to send a BUSY reply. + * + * Note: Only call this function if target_submit_cmd*() failed. + */ +int target_send_busy(struct se_cmd *cmd) +{ + WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); + + cmd->scsi_status = SAM_STAT_BUSY; + trace_target_cmd_complete(cmd); + return cmd->se_tfo->queue_status(cmd); +} +EXPORT_SYMBOL(target_send_busy); + +static void target_tmr_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + struct se_device *dev = cmd->se_dev; + struct se_tmr_req *tmr = cmd->se_tmr_req; + int ret; + + if (cmd->transport_state & CMD_T_ABORTED) + goto aborted; + + switch (tmr->function) { + case TMR_ABORT_TASK: + core_tmr_abort_task(dev, tmr, cmd->se_sess); + break; + case TMR_ABORT_TASK_SET: + case TMR_CLEAR_ACA: + case TMR_CLEAR_TASK_SET: + tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; + break; + case TMR_LUN_RESET: + ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); + tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : + TMR_FUNCTION_REJECTED; + if (tmr->response == TMR_FUNCTION_COMPLETE) { + target_dev_ua_allocate(dev, 0x29, + ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); + } + break; + case TMR_TARGET_WARM_RESET: + tmr->response = TMR_FUNCTION_REJECTED; + break; + case TMR_TARGET_COLD_RESET: + tmr->response = TMR_FUNCTION_REJECTED; + break; + default: + pr_err("Unknown TMR function: 0x%02x.\n", + tmr->function); + tmr->response = TMR_FUNCTION_REJECTED; + break; + } + + if (cmd->transport_state & CMD_T_ABORTED) + goto aborted; + + cmd->se_tfo->queue_tm_rsp(cmd); + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + +aborted: + target_handle_abort(cmd); +} + +int transport_generic_handle_tmr( + struct se_cmd *cmd) +{ + unsigned long flags; + bool aborted = false; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->transport_state & CMD_T_ABORTED) { + aborted = true; + } else { + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + cmd->transport_state |= CMD_T_ACTIVE; + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (aborted) { + pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", + cmd->se_tmr_req->function, + cmd->se_tmr_req->ref_task_tag, cmd->tag); + target_handle_abort(cmd); + return 0; + } + + INIT_WORK(&cmd->work, target_tmr_work); + schedule_work(&cmd->work); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_tmr); + +bool +target_check_wce(struct se_device *dev) +{ + bool wce = false; + + if (dev->transport->get_write_cache) + wce = dev->transport->get_write_cache(dev); + else if (dev->dev_attrib.emulate_write_cache > 0) + wce = true; + + return wce; +} + +bool +target_check_fua(struct se_device *dev) +{ + return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; +} diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c new file mode 100644 index 0000000000..4276690fb6 --- /dev/null +++ b/drivers/target/target_core_ua.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_ua.c + * + * This file contains logic for SPC-3 Unit Attention emulation + * + * (c) Copyright 2009-2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@kernel.org> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <scsi/scsi_proto.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_alua.h" +#include "target_core_pr.h" +#include "target_core_ua.h" + +sense_reason_t +target_scsi3_ua_check(struct se_cmd *cmd) +{ + struct se_dev_entry *deve; + struct se_session *sess = cmd->se_sess; + struct se_node_acl *nacl; + + if (!sess) + return 0; + + nacl = sess->se_node_acl; + if (!nacl) + return 0; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); + if (!deve) { + rcu_read_unlock(); + return 0; + } + if (list_empty_careful(&deve->ua_list)) { + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + /* + * From sam4r14, section 5.14 Unit attention condition: + * + * a) if an INQUIRY command enters the enabled command state, the + * device server shall process the INQUIRY command and shall neither + * report nor clear any unit attention condition; + * b) if a REPORT LUNS command enters the enabled command state, the + * device server shall process the REPORT LUNS command and shall not + * report any unit attention condition; + * e) if a REQUEST SENSE command enters the enabled command state while + * a unit attention condition exists for the SCSI initiator port + * associated with the I_T nexus on which the REQUEST SENSE command + * was received, then the device server shall process the command + * and either: + */ + switch (cmd->t_task_cdb[0]) { + case INQUIRY: + case REPORT_LUNS: + case REQUEST_SENSE: + return 0; + default: + return TCM_CHECK_CONDITION_UNIT_ATTENTION; + } +} + +int core_scsi3_ua_allocate( + struct se_dev_entry *deve, + u8 asc, + u8 ascq) +{ + struct se_ua *ua, *ua_p, *ua_tmp; + + ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); + if (!ua) { + pr_err("Unable to allocate struct se_ua\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&ua->ua_nacl_list); + + ua->ua_asc = asc; + ua->ua_ascq = ascq; + + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { + /* + * Do not report the same UNIT ATTENTION twice.. + */ + if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { + spin_unlock(&deve->ua_lock); + kmem_cache_free(se_ua_cache, ua); + return 0; + } + /* + * Attach the highest priority Unit Attention to + * the head of the list following sam4r14, + * Section 5.14 Unit Attention Condition: + * + * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest + * POWER ON OCCURRED or + * DEVICE INTERNAL RESET + * SCSI BUS RESET OCCURRED or + * MICROCODE HAS BEEN CHANGED or + * protocol specific + * BUS DEVICE RESET FUNCTION OCCURRED + * I_T NEXUS LOSS OCCURRED + * COMMANDS CLEARED BY POWER LOSS NOTIFICATION + * all others Lowest + * + * Each of the ASCQ codes listed above are defined in + * the 29h ASC family, see spc4r17 Table D.1 + */ + if (ua_p->ua_asc == 0x29) { + if ((asc == 0x29) && (ascq > ua_p->ua_ascq)) + list_add(&ua->ua_nacl_list, + &deve->ua_list); + else + list_add_tail(&ua->ua_nacl_list, + &deve->ua_list); + } else if (ua_p->ua_asc == 0x2a) { + /* + * Incoming Family 29h ASCQ codes will override + * Family 2AHh ASCQ codes for Unit Attention condition. + */ + if ((asc == 0x29) || (ascq > ua_p->ua_asc)) + list_add(&ua->ua_nacl_list, + &deve->ua_list); + else + list_add_tail(&ua->ua_nacl_list, + &deve->ua_list); + } else + list_add_tail(&ua->ua_nacl_list, + &deve->ua_list); + spin_unlock(&deve->ua_lock); + + return 0; + } + list_add_tail(&ua->ua_nacl_list, &deve->ua_list); + spin_unlock(&deve->ua_lock); + + pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:" + " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun, + asc, ascq); + + return 0; +} + +void target_ua_allocate_lun(struct se_node_acl *nacl, + u32 unpacked_lun, u8 asc, u8 ascq) +{ + struct se_dev_entry *deve; + + if (!nacl) + return; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, unpacked_lun); + if (!deve) { + rcu_read_unlock(); + return; + } + + core_scsi3_ua_allocate(deve, asc, ascq); + rcu_read_unlock(); +} + +void core_scsi3_ua_release_all( + struct se_dev_entry *deve) +{ + struct se_ua *ua, *ua_p; + + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { + list_del(&ua->ua_nacl_list); + kmem_cache_free(se_ua_cache, ua); + } + spin_unlock(&deve->ua_lock); +} + +/* + * Dequeue a unit attention from the unit attention list. This function + * returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have + * been set. + */ +bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc, + u8 *ascq) +{ + struct se_device *dev = cmd->se_dev; + struct se_dev_entry *deve; + struct se_session *sess = cmd->se_sess; + struct se_node_acl *nacl; + struct se_ua *ua = NULL, *ua_p; + int head = 1; + bool dev_ua_intlck_clear = (dev->dev_attrib.emulate_ua_intlck_ctrl + == TARGET_UA_INTLCK_CTRL_CLEAR); + + if (WARN_ON_ONCE(!sess)) + return false; + + nacl = sess->se_node_acl; + if (WARN_ON_ONCE(!nacl)) + return false; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); + if (!deve) { + rcu_read_unlock(); + *key = ILLEGAL_REQUEST; + *asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */ + *ascq = 0; + return true; + } + *key = UNIT_ATTENTION; + /* + * The highest priority Unit Attentions are placed at the head of the + * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION + + * sense data for the received CDB. + */ + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { + /* + * For ua_intlck_ctrl code not equal to 00b, only report the + * highest priority UNIT_ATTENTION and ASC/ASCQ without + * clearing it. + */ + if (!dev_ua_intlck_clear) { + *asc = ua->ua_asc; + *ascq = ua->ua_ascq; + break; + } + /* + * Otherwise for the default 00b, release the UNIT ATTENTION + * condition. Return the ASC/ASCQ of the highest priority UA + * (head of the list) in the outgoing CHECK_CONDITION + sense. + */ + if (head) { + *asc = ua->ua_asc; + *ascq = ua->ua_ascq; + head = 0; + } + list_del(&ua->ua_nacl_list); + kmem_cache_free(se_ua_cache, ua); + } + spin_unlock(&deve->ua_lock); + rcu_read_unlock(); + + pr_debug("[%s]: %s UNIT ATTENTION condition with" + " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x" + " reported ASC: 0x%02x, ASCQ: 0x%02x\n", + nacl->se_tpg->se_tpg_tfo->fabric_name, + dev_ua_intlck_clear ? "Releasing" : "Reporting", + dev->dev_attrib.emulate_ua_intlck_ctrl, + cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); + + return head == 0; +} + +int core_scsi3_ua_clear_for_request_sense( + struct se_cmd *cmd, + u8 *asc, + u8 *ascq) +{ + struct se_dev_entry *deve; + struct se_session *sess = cmd->se_sess; + struct se_node_acl *nacl; + struct se_ua *ua = NULL, *ua_p; + int head = 1; + + if (!sess) + return -EINVAL; + + nacl = sess->se_node_acl; + if (!nacl) + return -EINVAL; + + rcu_read_lock(); + deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); + if (!deve) { + rcu_read_unlock(); + return -EINVAL; + } + if (list_empty_careful(&deve->ua_list)) { + rcu_read_unlock(); + return -EPERM; + } + /* + * The highest priority Unit Attentions are placed at the head of the + * struct se_dev_entry->ua_list. The First (and hence highest priority) + * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the + * matching struct se_lun. + * + * Once the returning ASC/ASCQ values are set, we go ahead and + * release all of the Unit Attention conditions for the associated + * struct se_lun. + */ + spin_lock(&deve->ua_lock); + list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { + if (head) { + *asc = ua->ua_asc; + *ascq = ua->ua_ascq; + head = 0; + } + list_del(&ua->ua_nacl_list); + kmem_cache_free(se_ua_cache, ua); + } + spin_unlock(&deve->ua_lock); + rcu_read_unlock(); + + pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" + " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x," + " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->fabric_name, + cmd->orig_fe_lun, *asc, *ascq); + + return (head) ? -EPERM : 0; +} diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h new file mode 100644 index 0000000000..76487c9be0 --- /dev/null +++ b/drivers/target/target_core_ua.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef TARGET_CORE_UA_H +#define TARGET_CORE_UA_H + +#include <target/target_core_base.h> + +/* + * From spc4r17, Table D.1: ASC and ASCQ Assignement + */ +#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00 +#define ASCQ_29H_POWER_ON_OCCURRED 0x01 +#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02 +#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03 +#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04 +#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05 +#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06 +#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07 + +#define ASCQ_2AH_PARAMETERS_CHANGED 0x00 +#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01 +#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02 +#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03 +#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 +#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 +#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 +#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 +#define ASCQ_2AH_PRIORITY_CHANGED 0x08 + +#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 + +#define ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED 0x03 +#define ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED 0x0E + +extern struct kmem_cache *se_ua_cache; + +extern sense_reason_t target_scsi3_ua_check(struct se_cmd *); +extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8); +extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8); +extern void core_scsi3_ua_release_all(struct se_dev_entry *); +extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *, + u8 *); +extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, + u8 *, u8 *); + +#endif /* TARGET_CORE_UA_H */ diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c new file mode 100644 index 0000000000..22cc6cac0b --- /dev/null +++ b/drivers/target/target_core_user.c @@ -0,0 +1,3393 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Shaohua Li <shli@kernel.org> + * Copyright (C) 2014 Red Hat, Inc. + * Copyright (C) 2015 Arrikto, Inc. + * Copyright (C) 2017 Chinamobile, Inc. + */ + +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/parser.h> +#include <linux/vmalloc.h> +#include <linux/uio_driver.h> +#include <linux/xarray.h> +#include <linux/stringify.h> +#include <linux/bitops.h> +#include <linux/highmem.h> +#include <linux/configfs.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/pagemap.h> +#include <net/genetlink.h> +#include <scsi/scsi_common.h> +#include <scsi/scsi_proto.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/target_core_backend.h> + +#include <linux/target_core_user.h> + +/** + * DOC: Userspace I/O + * Userspace I/O + * ------------- + * + * Define a shared-memory interface for LIO to pass SCSI commands and + * data to userspace for processing. This is to allow backends that + * are too complex for in-kernel support to be possible. + * + * It uses the UIO framework to do a lot of the device-creation and + * introspection work for us. + * + * See the .h file for how the ring is laid out. Note that while the + * command ring is defined, the particulars of the data area are + * not. Offset values in the command entry point to other locations + * internal to the mmap-ed area. There is separate space outside the + * command ring for data buffers. This leaves maximum flexibility for + * moving buffer allocations, or even page flipping or other + * allocation techniques, without altering the command ring layout. + * + * SECURITY: + * The user process must be assumed to be malicious. There's no way to + * prevent it breaking the command ring protocol if it wants, but in + * order to prevent other issues we must only ever read *data* from + * the shared memory area, not offsets or sizes. This applies to + * command ring entries as well as the mailbox. Extra code needed for + * this may have a 'UAM' comment. + */ + +#define TCMU_TIME_OUT (30 * MSEC_PER_SEC) + +/* For mailbox plus cmd ring, the size is fixed 8MB */ +#define MB_CMDR_SIZE_DEF (8 * 1024 * 1024) +/* Offset of cmd ring is size of mailbox */ +#define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox)) +#define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF) + +/* + * For data area, the default block size is PAGE_SIZE and + * the default total size is 256K * PAGE_SIZE. + */ +#define DATA_PAGES_PER_BLK_DEF 1 +#define DATA_AREA_PAGES_DEF (256 * 1024) + +#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) +#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) + +/* + * Default number of global data blocks(512K * PAGE_SIZE) + * when the unmap thread will be started. + */ +#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024) + +static u8 tcmu_kern_cmd_reply_supported; +static u8 tcmu_netlink_blocked; + +static struct device *tcmu_root_device; + +struct tcmu_hba { + u32 host_id; +}; + +#define TCMU_CONFIG_LEN 256 + +static DEFINE_MUTEX(tcmu_nl_cmd_mutex); +static LIST_HEAD(tcmu_nl_cmd_list); + +struct tcmu_dev; + +struct tcmu_nl_cmd { + /* wake up thread waiting for reply */ + struct completion complete; + struct list_head nl_list; + struct tcmu_dev *udev; + int cmd; + int status; +}; + +struct tcmu_dev { + struct list_head node; + struct kref kref; + + struct se_device se_dev; + struct se_dev_plug se_plug; + + char *name; + struct se_hba *hba; + +#define TCMU_DEV_BIT_OPEN 0 +#define TCMU_DEV_BIT_BROKEN 1 +#define TCMU_DEV_BIT_BLOCKED 2 +#define TCMU_DEV_BIT_TMR_NOTIFY 3 +#define TCMU_DEV_BIT_PLUGGED 4 + unsigned long flags; + + struct uio_info uio_info; + + struct inode *inode; + + uint64_t dev_size; + + struct tcmu_mailbox *mb_addr; + void *cmdr; + u32 cmdr_size; + u32 cmdr_last_cleaned; + /* Offset of data area from start of mb */ + /* Must add data_off and mb_addr to get the address */ + size_t data_off; + int data_area_mb; + uint32_t max_blocks; + size_t mmap_pages; + + struct mutex cmdr_lock; + struct list_head qfull_queue; + struct list_head tmr_queue; + + uint32_t dbi_max; + uint32_t dbi_thresh; + unsigned long *data_bitmap; + struct xarray data_pages; + uint32_t data_pages_per_blk; + uint32_t data_blk_size; + + struct xarray commands; + + struct timer_list cmd_timer; + unsigned int cmd_time_out; + struct list_head inflight_queue; + + struct timer_list qfull_timer; + int qfull_time_out; + + struct list_head timedout_entry; + + struct tcmu_nl_cmd curr_nl_cmd; + + char dev_config[TCMU_CONFIG_LEN]; + + int nl_reply_supported; +}; + +#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) + +struct tcmu_cmd { + struct se_cmd *se_cmd; + struct tcmu_dev *tcmu_dev; + struct list_head queue_entry; + + uint16_t cmd_id; + + /* Can't use se_cmd when cleaning up expired cmds, because if + cmd has been completed then accessing se_cmd is off limits */ + uint32_t dbi_cnt; + uint32_t dbi_bidi_cnt; + uint32_t dbi_cur; + uint32_t *dbi; + + uint32_t data_len_bidi; + + unsigned long deadline; + +#define TCMU_CMD_BIT_EXPIRED 0 +#define TCMU_CMD_BIT_KEEP_BUF 1 + unsigned long flags; +}; + +struct tcmu_tmr { + struct list_head queue_entry; + + uint8_t tmr_type; + uint32_t tmr_cmd_cnt; + int16_t tmr_cmd_ids[]; +}; + +/* + * To avoid dead lock the mutex lock order should always be: + * + * mutex_lock(&root_udev_mutex); + * ... + * mutex_lock(&tcmu_dev->cmdr_lock); + * mutex_unlock(&tcmu_dev->cmdr_lock); + * ... + * mutex_unlock(&root_udev_mutex); + */ +static DEFINE_MUTEX(root_udev_mutex); +static LIST_HEAD(root_udev); + +static DEFINE_SPINLOCK(timed_out_udevs_lock); +static LIST_HEAD(timed_out_udevs); + +static struct kmem_cache *tcmu_cmd_cache; + +static atomic_t global_page_count = ATOMIC_INIT(0); +static struct delayed_work tcmu_unmap_work; +static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF; + +static int tcmu_set_global_max_data_area(const char *str, + const struct kernel_param *kp) +{ + int ret, max_area_mb; + + ret = kstrtoint(str, 10, &max_area_mb); + if (ret) + return -EINVAL; + + if (max_area_mb <= 0) { + pr_err("global_max_data_area must be larger than 0.\n"); + return -EINVAL; + } + + tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb); + if (atomic_read(&global_page_count) > tcmu_global_max_pages) + schedule_delayed_work(&tcmu_unmap_work, 0); + else + cancel_delayed_work_sync(&tcmu_unmap_work); + + return 0; +} + +static int tcmu_get_global_max_data_area(char *buffer, + const struct kernel_param *kp) +{ + return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); +} + +static const struct kernel_param_ops tcmu_global_max_data_area_op = { + .set = tcmu_set_global_max_data_area, + .get = tcmu_get_global_max_data_area, +}; + +module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, + S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(global_max_data_area_mb, + "Max MBs allowed to be allocated to all the tcmu device's " + "data areas."); + +static int tcmu_get_block_netlink(char *buffer, + const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? + "blocked" : "unblocked"); +} + +static int tcmu_set_block_netlink(const char *str, + const struct kernel_param *kp) +{ + int ret; + u8 val; + + ret = kstrtou8(str, 0, &val); + if (ret < 0) + return ret; + + if (val > 1) { + pr_err("Invalid block netlink value %u\n", val); + return -EINVAL; + } + + tcmu_netlink_blocked = val; + return 0; +} + +static const struct kernel_param_ops tcmu_block_netlink_op = { + .set = tcmu_set_block_netlink, + .get = tcmu_get_block_netlink, +}; + +module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); + +static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) +{ + struct tcmu_dev *udev = nl_cmd->udev; + + if (!tcmu_netlink_blocked) { + pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); + return -EBUSY; + } + + if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { + pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); + nl_cmd->status = -EINTR; + list_del(&nl_cmd->nl_list); + complete(&nl_cmd->complete); + } + return 0; +} + +static int tcmu_set_reset_netlink(const char *str, + const struct kernel_param *kp) +{ + struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; + int ret; + u8 val; + + ret = kstrtou8(str, 0, &val); + if (ret < 0) + return ret; + + if (val != 1) { + pr_err("Invalid reset netlink value %u\n", val); + return -EINVAL; + } + + mutex_lock(&tcmu_nl_cmd_mutex); + list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { + ret = tcmu_fail_netlink_cmd(nl_cmd); + if (ret) + break; + } + mutex_unlock(&tcmu_nl_cmd_mutex); + + return ret; +} + +static const struct kernel_param_ops tcmu_reset_netlink_op = { + .set = tcmu_set_reset_netlink, +}; + +module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); +MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); + +/* multicast group */ +enum tcmu_multicast_groups { + TCMU_MCGRP_CONFIG, +}; + +static const struct genl_multicast_group tcmu_mcgrps[] = { + [TCMU_MCGRP_CONFIG] = { .name = "config", }, +}; + +static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { + [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, + [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, + [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, + [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, + [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, +}; + +static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) +{ + struct tcmu_dev *udev = NULL; + struct tcmu_nl_cmd *nl_cmd; + int dev_id, rc, ret = 0; + + if (!info->attrs[TCMU_ATTR_CMD_STATUS] || + !info->attrs[TCMU_ATTR_DEVICE_ID]) { + printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); + return -EINVAL; + } + + dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); + rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); + + mutex_lock(&tcmu_nl_cmd_mutex); + list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { + if (nl_cmd->udev->se_dev.dev_index == dev_id) { + udev = nl_cmd->udev; + break; + } + } + + if (!udev) { + pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", + completed_cmd, rc, dev_id); + ret = -ENODEV; + goto unlock; + } + list_del(&nl_cmd->nl_list); + + pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", + udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, + nl_cmd->status); + + if (nl_cmd->cmd != completed_cmd) { + pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", + udev->name, completed_cmd, nl_cmd->cmd); + ret = -EINVAL; + goto unlock; + } + + nl_cmd->status = rc; + complete(&nl_cmd->complete); +unlock: + mutex_unlock(&tcmu_nl_cmd_mutex); + return ret; +} + +static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) +{ + return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); +} + +static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) +{ + return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); +} + +static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, + struct genl_info *info) +{ + return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); +} + +static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) +{ + if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { + tcmu_kern_cmd_reply_supported = + nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); + printk(KERN_INFO "tcmu daemon: command reply support %u.\n", + tcmu_kern_cmd_reply_supported); + } + + return 0; +} + +static const struct genl_small_ops tcmu_genl_ops[] = { + { + .cmd = TCMU_CMD_SET_FEATURES, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .flags = GENL_ADMIN_PERM, + .doit = tcmu_genl_set_features, + }, + { + .cmd = TCMU_CMD_ADDED_DEVICE_DONE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .flags = GENL_ADMIN_PERM, + .doit = tcmu_genl_add_dev_done, + }, + { + .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .flags = GENL_ADMIN_PERM, + .doit = tcmu_genl_rm_dev_done, + }, + { + .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .flags = GENL_ADMIN_PERM, + .doit = tcmu_genl_reconfig_dev_done, + }, +}; + +/* Our generic netlink family */ +static struct genl_family tcmu_genl_family __ro_after_init = { + .module = THIS_MODULE, + .hdrsize = 0, + .name = "TCM-USER", + .version = 2, + .maxattr = TCMU_ATTR_MAX, + .policy = tcmu_attr_policy, + .mcgrps = tcmu_mcgrps, + .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), + .netnsok = true, + .small_ops = tcmu_genl_ops, + .n_small_ops = ARRAY_SIZE(tcmu_genl_ops), + .resv_start_op = TCMU_CMD_SET_FEATURES + 1, +}; + +#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) +#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) +#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) +#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) + +static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + uint32_t i; + + for (i = 0; i < len; i++) + clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); +} + +static inline int tcmu_get_empty_block(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, + int prev_dbi, int length, int *iov_cnt) +{ + XA_STATE(xas, &udev->data_pages, 0); + struct page *page; + int i, cnt, dbi, dpi; + int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE); + + dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); + if (dbi == udev->dbi_thresh) + return -1; + + dpi = dbi * udev->data_pages_per_blk; + /* Count the number of already allocated pages */ + xas_set(&xas, dpi); + rcu_read_lock(); + for (cnt = 0; xas_next(&xas) && cnt < page_cnt;) + cnt++; + rcu_read_unlock(); + + for (i = cnt; i < page_cnt; i++) { + /* try to get new zeroed page from the mm */ + page = alloc_page(GFP_NOIO | __GFP_ZERO); + if (!page) + break; + + if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) { + __free_page(page); + break; + } + } + if (atomic_add_return(i - cnt, &global_page_count) > + tcmu_global_max_pages) + schedule_delayed_work(&tcmu_unmap_work, 0); + + if (i && dbi > udev->dbi_max) + udev->dbi_max = dbi; + + set_bit(dbi, udev->data_bitmap); + tcmu_cmd_set_dbi(tcmu_cmd, dbi); + + if (dbi != prev_dbi + 1) + *iov_cnt += 1; + + return i == page_cnt ? dbi : -1; +} + +static int tcmu_get_empty_blocks(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, int length) +{ + /* start value of dbi + 1 must not be a valid dbi */ + int dbi = -2; + int blk_data_len, iov_cnt = 0; + uint32_t blk_size = udev->data_blk_size; + + for (; length > 0; length -= blk_size) { + blk_data_len = min_t(uint32_t, length, blk_size); + dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, + &iov_cnt); + if (dbi < 0) + return -1; + } + return iov_cnt; +} + +static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) +{ + kfree(tcmu_cmd->dbi); + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); +} + +static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd) +{ + int i, len; + struct se_cmd *se_cmd = cmd->se_cmd; + uint32_t blk_size = cmd->tcmu_dev->data_blk_size; + + cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size); + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); + for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) + len += se_cmd->t_bidi_data_sg[i].length; + cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size); + cmd->dbi_cnt += cmd->dbi_bidi_cnt; + cmd->data_len_bidi = len; + } +} + +static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, + struct iovec **iov, int prev_dbi, int len) +{ + /* Get the next dbi */ + int dbi = tcmu_cmd_get_dbi(cmd); + + /* Do not add more than udev->data_blk_size to iov */ + len = min_t(int, len, udev->data_blk_size); + + /* + * The following code will gather and map the blocks to the same iovec + * when the blocks are all next to each other. + */ + if (dbi != prev_dbi + 1) { + /* dbi is not next to previous dbi, so start new iov */ + if (prev_dbi >= 0) + (*iov)++; + /* write offset relative to mb_addr */ + (*iov)->iov_base = (void __user *) + (udev->data_off + dbi * udev->data_blk_size); + } + (*iov)->iov_len += len; + + return dbi; +} + +static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, + struct iovec **iov, int data_length) +{ + /* start value of dbi + 1 must not be a valid dbi */ + int dbi = -2; + + /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */ + for (; data_length > 0; data_length -= udev->data_blk_size) + dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); +} + +static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + struct tcmu_cmd *tcmu_cmd; + + tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); + if (!tcmu_cmd) + return NULL; + + INIT_LIST_HEAD(&tcmu_cmd->queue_entry); + tcmu_cmd->se_cmd = se_cmd; + tcmu_cmd->tcmu_dev = udev; + + tcmu_cmd_set_block_cnts(tcmu_cmd); + tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), + GFP_NOIO); + if (!tcmu_cmd->dbi) { + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); + return NULL; + } + + return tcmu_cmd; +} + +static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) +{ + unsigned long offset = offset_in_page(vaddr); + void *start = vaddr - offset; + + size = round_up(size+offset, PAGE_SIZE); + + while (size) { + flush_dcache_page(vmalloc_to_page(start)); + start += PAGE_SIZE; + size -= PAGE_SIZE; + } +} + +/* + * Some ring helper functions. We don't assume size is a power of 2 so + * we can't use circ_buf.h. + */ +static inline size_t spc_used(size_t head, size_t tail, size_t size) +{ + int diff = head - tail; + + if (diff >= 0) + return diff; + else + return size + diff; +} + +static inline size_t spc_free(size_t head, size_t tail, size_t size) +{ + /* Keep 1 byte unused or we can't tell full from empty */ + return (size - spc_used(head, tail, size) - 1); +} + +static inline size_t head_to_end(size_t head, size_t size) +{ + return size - head; +} + +#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) + +#define TCMU_SG_TO_DATA_AREA 1 +#define TCMU_DATA_AREA_TO_SG 2 + +static inline void tcmu_copy_data(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, uint32_t direction, + struct scatterlist *sg, unsigned int sg_nents, + struct iovec **iov, size_t data_len) +{ + /* start value of dbi + 1 must not be a valid dbi */ + int dbi = -2; + size_t page_remaining, cp_len; + int page_cnt, page_inx, dpi; + struct sg_mapping_iter sg_iter; + unsigned int sg_flags; + struct page *page; + void *data_page_start, *data_addr; + + if (direction == TCMU_SG_TO_DATA_AREA) + sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG; + else + sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; + sg_miter_start(&sg_iter, sg, sg_nents, sg_flags); + + while (data_len) { + if (direction == TCMU_SG_TO_DATA_AREA) + dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, + data_len); + else + dbi = tcmu_cmd_get_dbi(tcmu_cmd); + + page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE); + if (page_cnt > udev->data_pages_per_blk) + page_cnt = udev->data_pages_per_blk; + + dpi = dbi * udev->data_pages_per_blk; + for (page_inx = 0; page_inx < page_cnt && data_len; + page_inx++, dpi++) { + page = xa_load(&udev->data_pages, dpi); + + if (direction == TCMU_DATA_AREA_TO_SG) + flush_dcache_page(page); + data_page_start = kmap_atomic(page); + page_remaining = PAGE_SIZE; + + while (page_remaining && data_len) { + if (!sg_miter_next(&sg_iter)) { + /* set length to 0 to abort outer loop */ + data_len = 0; + pr_debug("%s: aborting data copy due to exhausted sg_list\n", + __func__); + break; + } + cp_len = min3(sg_iter.length, page_remaining, + data_len); + + data_addr = data_page_start + + PAGE_SIZE - page_remaining; + if (direction == TCMU_SG_TO_DATA_AREA) + memcpy(data_addr, sg_iter.addr, cp_len); + else + memcpy(sg_iter.addr, data_addr, cp_len); + + data_len -= cp_len; + page_remaining -= cp_len; + sg_iter.consumed = cp_len; + } + sg_miter_stop(&sg_iter); + + kunmap_atomic(data_page_start); + if (direction == TCMU_SG_TO_DATA_AREA) + flush_dcache_page(page); + } + } +} + +static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, + struct iovec **iov) +{ + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + + tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, + se_cmd->t_data_nents, iov, se_cmd->data_length); +} + +static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, + bool bidi, uint32_t read_len) +{ + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg; + unsigned int data_nents; + + if (!bidi) { + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + } else { + /* + * For bidi case, the first count blocks are for Data-Out + * buffer blocks, and before gathering the Data-In buffer + * the Data-Out buffer blocks should be skipped. + */ + tcmu_cmd_set_dbi_cur(tcmu_cmd, + tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt); + + data_sg = se_cmd->t_bidi_data_sg; + data_nents = se_cmd->t_bidi_data_nents; + } + + tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, + data_nents, NULL, read_len); +} + +static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) +{ + return thresh - bitmap_weight(bitmap, thresh); +} + +/* + * We can't queue a command until we have space available on the cmd ring. + * + * Called with ring lock held. + */ +static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) +{ + struct tcmu_mailbox *mb = udev->mb_addr; + size_t space, cmd_needed; + u32 cmd_head; + + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + + /* + * If cmd end-of-ring space is too small then we need space for a NOP plus + * original cmd - cmds are internally contiguous. + */ + if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) + cmd_needed = cmd_size; + else + cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); + + space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); + if (space < cmd_needed) { + pr_debug("no cmd space: %u %u %u\n", cmd_head, + udev->cmdr_last_cleaned, udev->cmdr_size); + return false; + } + return true; +} + +/* + * We have to allocate data buffers before we can queue a command. + * Returns -1 on error (not enough space) or number of needed iovs on success + * + * Called with ring lock held. + */ +static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, + int *iov_bidi_cnt) +{ + int space, iov_cnt = 0, ret = 0; + + if (!cmd->dbi_cnt) + goto wr_iov_cnts; + + /* try to check and get the data blocks as needed */ + space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); + if (space < cmd->dbi_cnt) { + unsigned long blocks_left = + (udev->max_blocks - udev->dbi_thresh) + space; + + if (blocks_left < cmd->dbi_cnt) { + pr_debug("no data space: only %lu available, but ask for %u\n", + blocks_left * udev->data_blk_size, + cmd->dbi_cnt * udev->data_blk_size); + return -1; + } + + udev->dbi_thresh += cmd->dbi_cnt; + if (udev->dbi_thresh > udev->max_blocks) + udev->dbi_thresh = udev->max_blocks; + } + + iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); + if (iov_cnt < 0) + return -1; + + if (cmd->dbi_bidi_cnt) { + ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); + if (ret < 0) + return -1; + } +wr_iov_cnts: + *iov_bidi_cnt = ret; + return iov_cnt + ret; +} + +static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) +{ + return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), + sizeof(struct tcmu_cmd_entry)); +} + +static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, + size_t base_command_size) +{ + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + size_t command_size; + + command_size = base_command_size + + round_up(scsi_command_size(se_cmd->t_task_cdb), + TCMU_OP_ALIGN_SIZE); + + WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); + + return command_size; +} + +static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, + struct timer_list *timer) +{ + if (!tmo) + return; + + tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); + if (!timer_pending(timer)) + mod_timer(timer, tcmu_cmd->deadline); + + pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, + tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); +} + +static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + unsigned int tmo; + + /* + * For backwards compat if qfull_time_out is not set use + * cmd_time_out and if that's not set use the default time out. + */ + if (!udev->qfull_time_out) + return -ETIMEDOUT; + else if (udev->qfull_time_out > 0) + tmo = udev->qfull_time_out; + else if (udev->cmd_time_out) + tmo = udev->cmd_time_out; + else + tmo = TCMU_TIME_OUT; + + tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); + + list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); + pr_debug("adding cmd %p on dev %s to ring space wait queue\n", + tcmu_cmd, udev->name); + return 0; +} + +static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) +{ + struct tcmu_cmd_entry_hdr *hdr; + struct tcmu_mailbox *mb = udev->mb_addr; + uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + + /* Insert a PAD if end-of-ring space is too small */ + if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { + size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); + + hdr = udev->cmdr + cmd_head; + tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); + tcmu_hdr_set_len(&hdr->len_op, pad_size); + hdr->cmd_id = 0; /* not used for PAD */ + hdr->kflags = 0; + hdr->uflags = 0; + tcmu_flush_dcache_range(hdr, sizeof(*hdr)); + + UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + WARN_ON(cmd_head != 0); + } + + return cmd_head; +} + +static void tcmu_unplug_device(struct se_dev_plug *se_plug) +{ + struct se_device *se_dev = se_plug->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags); + uio_event_notify(&udev->uio_info); +} + +static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev) +{ + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) + return &udev->se_plug; + + return NULL; +} + +/** + * queue_cmd_ring - queue cmd to ring or internally + * @tcmu_cmd: cmd to queue + * @scsi_err: TCM error code if failure (-1) returned. + * + * Returns: + * -1 we cannot queue internally or to the ring. + * 0 success + * 1 internally queued to wait for ring memory to free. + */ +static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + size_t base_command_size, command_size; + struct tcmu_mailbox *mb = udev->mb_addr; + struct tcmu_cmd_entry *entry; + struct iovec *iov; + int iov_cnt, iov_bidi_cnt; + uint32_t cmd_id, cmd_head; + uint64_t cdb_off; + uint32_t blk_size = udev->data_blk_size; + /* size of data buffer needed */ + size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; + + *scsi_err = TCM_NO_SENSE; + + if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { + *scsi_err = TCM_LUN_BUSY; + return -1; + } + + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { + *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -1; + } + + if (!list_empty(&udev->qfull_queue)) + goto queue; + + if (data_length > (size_t)udev->max_blocks * blk_size) { + pr_warn("TCMU: Request of size %zu is too big for %zu data area\n", + data_length, (size_t)udev->max_blocks * blk_size); + *scsi_err = TCM_INVALID_CDB_FIELD; + return -1; + } + + iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); + if (iov_cnt < 0) + goto free_and_queue; + + /* + * Must be a certain minimum size for response sense info, but + * also may be larger if the iov array is large. + */ + base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt); + command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); + + if (command_size > (udev->cmdr_size / 2)) { + pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n", + command_size, udev->cmdr_size); + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); + *scsi_err = TCM_INVALID_CDB_FIELD; + return -1; + } + + if (!is_ring_space_avail(udev, command_size)) + /* + * Don't leave commands partially setup because the unmap + * thread might need the blocks to make forward progress. + */ + goto free_and_queue; + + if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), + GFP_NOWAIT) < 0) { + pr_err("tcmu: Could not allocate cmd id.\n"); + + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); + *scsi_err = TCM_OUT_OF_RESOURCES; + return -1; + } + tcmu_cmd->cmd_id = cmd_id; + + pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, + tcmu_cmd, udev->name); + + cmd_head = ring_insert_padding(udev, command_size); + + entry = udev->cmdr + cmd_head; + memset(entry, 0, command_size); + tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); + + /* prepare iov list and copy data to data area if necessary */ + tcmu_cmd_reset_dbi_cur(tcmu_cmd); + iov = &entry->req.iov[0]; + + if (se_cmd->data_direction == DMA_TO_DEVICE || + se_cmd->se_cmd_flags & SCF_BIDI) + scatter_data_area(udev, tcmu_cmd, &iov); + else + tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + + entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; + + /* Handle BIDI commands */ + if (se_cmd->se_cmd_flags & SCF_BIDI) { + iov++; + tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); + entry->req.iov_bidi_cnt = iov_bidi_cnt; + } + + tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); + + entry->hdr.cmd_id = tcmu_cmd->cmd_id; + + tcmu_hdr_set_len(&entry->hdr.len_op, command_size); + + /* All offsets relative to mb_addr, not start of entry! */ + cdb_off = CMDR_OFF + cmd_head + base_command_size; + memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); + entry->req.cdb_off = cdb_off; + tcmu_flush_dcache_range(entry, command_size); + + UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); + + if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) + uio_event_notify(&udev->uio_info); + + return 0; + +free_and_queue: + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); + tcmu_cmd_reset_dbi_cur(tcmu_cmd); + +queue: + if (add_to_qfull_queue(tcmu_cmd)) { + *scsi_err = TCM_OUT_OF_RESOURCES; + return -1; + } + + return 1; +} + +/** + * queue_tmr_ring - queue tmr info to ring or internally + * @udev: related tcmu_dev + * @tmr: tcmu_tmr containing tmr info to queue + * + * Returns: + * 0 success + * 1 internally queued to wait for ring memory to free. + */ +static int +queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) +{ + struct tcmu_tmr_entry *entry; + int cmd_size; + int id_list_sz; + struct tcmu_mailbox *mb = udev->mb_addr; + uint32_t cmd_head; + + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) + goto out_free; + + id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; + cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE); + + if (!list_empty(&udev->tmr_queue) || + !is_ring_space_avail(udev, cmd_size)) { + list_add_tail(&tmr->queue_entry, &udev->tmr_queue); + pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n", + tmr, udev->name); + return 1; + } + + cmd_head = ring_insert_padding(udev, cmd_size); + + entry = udev->cmdr + cmd_head; + memset(entry, 0, cmd_size); + tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); + tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); + entry->tmr_type = tmr->tmr_type; + entry->cmd_cnt = tmr->tmr_cmd_cnt; + memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); + tcmu_flush_dcache_range(entry, cmd_size); + + UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + uio_event_notify(&udev->uio_info); + +out_free: + kfree(tmr); + + return 0; +} + +static sense_reason_t +tcmu_queue_cmd(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + struct tcmu_cmd *tcmu_cmd; + sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD; + int ret = -1; + + tcmu_cmd = tcmu_alloc_cmd(se_cmd); + if (!tcmu_cmd) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + mutex_lock(&udev->cmdr_lock); + if (!(se_cmd->transport_state & CMD_T_ABORTED)) + ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); + if (ret < 0) + tcmu_free_cmd(tcmu_cmd); + else + se_cmd->priv = tcmu_cmd; + mutex_unlock(&udev->cmdr_lock); + return scsi_ret; +} + +static void tcmu_set_next_deadline(struct list_head *queue, + struct timer_list *timer) +{ + struct tcmu_cmd *cmd; + + if (!list_empty(queue)) { + cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); + mod_timer(timer, cmd->deadline); + } else + del_timer(timer); +} + +static int +tcmu_tmr_type(enum tcm_tmreq_table tmf) +{ + switch (tmf) { + case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK; + case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET; + case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA; + case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET; + case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET; + case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET; + case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET; + case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO; + default: return TCMU_TMR_UNKNOWN; + } +} + +static void +tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, + struct list_head *cmd_list) +{ + int i = 0, cmd_cnt = 0; + bool unqueued = false; + struct tcmu_cmd *cmd; + struct se_cmd *se_cmd; + struct tcmu_tmr *tmr; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + mutex_lock(&udev->cmdr_lock); + + /* First we check for aborted commands in qfull_queue */ + list_for_each_entry(se_cmd, cmd_list, state_list) { + i++; + if (!se_cmd->priv) + continue; + cmd = se_cmd->priv; + /* Commands on qfull queue have no id yet */ + if (cmd->cmd_id) { + cmd_cnt++; + continue; + } + pr_debug("Removing aborted command %p from queue on dev %s.\n", + cmd, udev->name); + + list_del_init(&cmd->queue_entry); + tcmu_free_cmd(cmd); + se_cmd->priv = NULL; + target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); + unqueued = true; + } + if (unqueued) + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); + + if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) + goto unlock; + + pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", + tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); + + tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO); + if (!tmr) + goto unlock; + + tmr->tmr_type = tcmu_tmr_type(tmf); + tmr->tmr_cmd_cnt = cmd_cnt; + + if (cmd_cnt != 0) { + cmd_cnt = 0; + list_for_each_entry(se_cmd, cmd_list, state_list) { + if (!se_cmd->priv) + continue; + cmd = se_cmd->priv; + if (cmd->cmd_id) + tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; + } + } + + queue_tmr_ring(udev, tmr); + +unlock: + mutex_unlock(&udev->cmdr_lock); +} + +static bool tcmu_handle_completion(struct tcmu_cmd *cmd, + struct tcmu_cmd_entry *entry, bool keep_buf) +{ + struct se_cmd *se_cmd = cmd->se_cmd; + struct tcmu_dev *udev = cmd->tcmu_dev; + bool read_len_valid = false; + bool ret = true; + uint32_t read_len; + + /* + * cmd has been completed already from timeout, just reclaim + * data area space and free cmd + */ + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON_ONCE(se_cmd); + goto out; + } + if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { + pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n", + entry->hdr.cmd_id); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + ret = false; + goto out; + } + + list_del_init(&cmd->queue_entry); + + tcmu_cmd_reset_dbi_cur(cmd); + + if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { + pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", + cmd->se_cmd); + entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; + goto done; + } + + read_len = se_cmd->data_length; + if (se_cmd->data_direction == DMA_FROM_DEVICE && + (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { + read_len_valid = true; + if (entry->rsp.read_len < read_len) + read_len = entry->rsp.read_len; + } + + if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { + transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); + if (!read_len_valid ) + goto done; + else + se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; + } + if (se_cmd->se_cmd_flags & SCF_BIDI) { + /* Get Data-In buffer before clean up */ + gather_data_area(udev, cmd, true, read_len); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + gather_data_area(udev, cmd, false, read_len); + } else if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* TODO: */ + } else if (se_cmd->data_direction != DMA_NONE) { + pr_warn("TCMU: data direction was %d!\n", + se_cmd->data_direction); + } + +done: + se_cmd->priv = NULL; + if (read_len_valid) { + pr_debug("read_len = %d\n", read_len); + target_complete_cmd_with_length(cmd->se_cmd, + entry->rsp.scsi_status, read_len); + } else + target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); + +out: + if (!keep_buf) { + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + } else { + /* + * Keep this command after completion, since userspace still + * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF + * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept + * a second completion later. + * Userspace can free the buffer later by writing the cmd_id + * to new action attribute free_kept_buf. + */ + clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); + set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags); + } + return ret; +} + +static int tcmu_run_tmr_queue(struct tcmu_dev *udev) +{ + struct tcmu_tmr *tmr, *tmp; + LIST_HEAD(tmrs); + + if (list_empty(&udev->tmr_queue)) + return 1; + + pr_debug("running %s's tmr queue\n", udev->name); + + list_splice_init(&udev->tmr_queue, &tmrs); + + list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) { + list_del_init(&tmr->queue_entry); + + pr_debug("removing tmr %p on dev %s from queue\n", + tmr, udev->name); + + if (queue_tmr_ring(udev, tmr)) { + pr_debug("ran out of space during tmr queue run\n"); + /* + * tmr was requeued, so just put all tmrs back in + * the queue + */ + list_splice_tail(&tmrs, &udev->tmr_queue); + return 0; + } + } + + return 1; +} + +static bool tcmu_handle_completions(struct tcmu_dev *udev) +{ + struct tcmu_mailbox *mb; + struct tcmu_cmd *cmd; + bool free_space = false; + + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { + pr_err("ring broken, not handling completions\n"); + return false; + } + + mb = udev->mb_addr; + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { + + struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; + bool keep_buf; + + /* + * Flush max. up to end of cmd ring since current entry might + * be a padding that is shorter than sizeof(*entry) + */ + size_t ring_left = head_to_end(udev->cmdr_last_cleaned, + udev->cmdr_size); + tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? + ring_left : sizeof(*entry)); + + free_space = true; + + if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || + tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { + UPDATE_HEAD(udev->cmdr_last_cleaned, + tcmu_hdr_get_len(entry->hdr.len_op), + udev->cmdr_size); + continue; + } + WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); + + keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF); + if (keep_buf) + cmd = xa_load(&udev->commands, entry->hdr.cmd_id); + else + cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); + if (!cmd) { + pr_err("cmd_id %u not found, ring is broken\n", + entry->hdr.cmd_id); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + return false; + } + + if (!tcmu_handle_completion(cmd, entry, keep_buf)) + break; + + UPDATE_HEAD(udev->cmdr_last_cleaned, + tcmu_hdr_get_len(entry->hdr.len_op), + udev->cmdr_size); + } + if (free_space) + free_space = tcmu_run_tmr_queue(udev); + + if (atomic_read(&global_page_count) > tcmu_global_max_pages && + xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { + /* + * Allocated blocks exceeded global block limit, currently no + * more pending or waiting commands so try to reclaim blocks. + */ + schedule_delayed_work(&tcmu_unmap_work, 0); + } + if (udev->cmd_time_out) + tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); + + return free_space; +} + +static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) +{ + struct se_cmd *se_cmd; + + if (!time_after_eq(jiffies, cmd->deadline)) + return; + + set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); + list_del_init(&cmd->queue_entry); + se_cmd = cmd->se_cmd; + se_cmd->priv = NULL; + cmd->se_cmd = NULL; + + pr_debug("Timing out inflight cmd %u on dev %s.\n", + cmd->cmd_id, cmd->tcmu_dev->name); + + target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); +} + +static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) +{ + struct se_cmd *se_cmd; + + if (!time_after_eq(jiffies, cmd->deadline)) + return; + + pr_debug("Timing out queued cmd %p on dev %s.\n", + cmd, cmd->tcmu_dev->name); + + list_del_init(&cmd->queue_entry); + se_cmd = cmd->se_cmd; + tcmu_free_cmd(cmd); + + se_cmd->priv = NULL; + target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); +} + +static void tcmu_device_timedout(struct tcmu_dev *udev) +{ + spin_lock(&timed_out_udevs_lock); + if (list_empty(&udev->timedout_entry)) + list_add_tail(&udev->timedout_entry, &timed_out_udevs); + spin_unlock(&timed_out_udevs_lock); + + schedule_delayed_work(&tcmu_unmap_work, 0); +} + +static void tcmu_cmd_timedout(struct timer_list *t) +{ + struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); + + pr_debug("%s cmd timeout has expired\n", udev->name); + tcmu_device_timedout(udev); +} + +static void tcmu_qfull_timedout(struct timer_list *t) +{ + struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); + + pr_debug("%s qfull timeout has expired\n", udev->name); + tcmu_device_timedout(udev); +} + +static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct tcmu_hba *tcmu_hba; + + tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); + if (!tcmu_hba) + return -ENOMEM; + + tcmu_hba->host_id = host_id; + hba->hba_ptr = tcmu_hba; + + return 0; +} + +static void tcmu_detach_hba(struct se_hba *hba) +{ + kfree(hba->hba_ptr); + hba->hba_ptr = NULL; +} + +static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) +{ + struct tcmu_dev *udev; + + udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); + if (!udev) + return NULL; + kref_init(&udev->kref); + + udev->name = kstrdup(name, GFP_KERNEL); + if (!udev->name) { + kfree(udev); + return NULL; + } + + udev->hba = hba; + udev->cmd_time_out = TCMU_TIME_OUT; + udev->qfull_time_out = -1; + + udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; + udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; + udev->cmdr_size = CMDR_SIZE_DEF; + udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); + + mutex_init(&udev->cmdr_lock); + + INIT_LIST_HEAD(&udev->node); + INIT_LIST_HEAD(&udev->timedout_entry); + INIT_LIST_HEAD(&udev->qfull_queue); + INIT_LIST_HEAD(&udev->tmr_queue); + INIT_LIST_HEAD(&udev->inflight_queue); + xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); + + timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); + timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); + + xa_init(&udev->data_pages); + + return &udev->se_dev; +} + +static void tcmu_dev_call_rcu(struct rcu_head *p) +{ + struct se_device *dev = container_of(p, struct se_device, rcu_head); + struct tcmu_dev *udev = TCMU_DEV(dev); + + kfree(udev->uio_info.name); + kfree(udev->name); + kfree(udev); +} + +static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) +{ + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) || + test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { + kmem_cache_free(tcmu_cmd_cache, cmd); + return 0; + } + return -EINVAL; +} + +static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first, + unsigned long last) +{ + struct page *page; + unsigned long dpi; + u32 pages_freed = 0; + + first = first * udev->data_pages_per_blk; + last = (last + 1) * udev->data_pages_per_blk - 1; + xa_for_each_range(&udev->data_pages, dpi, page, first, last) { + xa_erase(&udev->data_pages, dpi); + /* + * While reaching here there may be page faults occurring on + * the to-be-released pages. A race condition may occur if + * unmap_mapping_range() is called before page faults on these + * pages have completed; a valid but stale map is created. + * + * If another command subsequently runs and needs to extend + * dbi_thresh, it may reuse the slot corresponding to the + * previous page in data_bitmap. Though we will allocate a new + * page for the slot in data_area, no page fault will happen + * because we have a valid map. Therefore the command's data + * will be lost. + * + * We lock and unlock pages that are to be released to ensure + * all page faults have completed. This way + * unmap_mapping_range() can ensure stale maps are cleanly + * removed. + */ + lock_page(page); + unlock_page(page); + __free_page(page); + pages_freed++; + } + + atomic_sub(pages_freed, &global_page_count); + + return pages_freed; +} + +static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) +{ + struct tcmu_tmr *tmr, *tmp; + + list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { + list_del_init(&tmr->queue_entry); + kfree(tmr); + } +} + +static void tcmu_dev_kref_release(struct kref *kref) +{ + struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); + struct se_device *dev = &udev->se_dev; + struct tcmu_cmd *cmd; + bool all_expired = true; + unsigned long i; + + vfree(udev->mb_addr); + udev->mb_addr = NULL; + + spin_lock_bh(&timed_out_udevs_lock); + if (!list_empty(&udev->timedout_entry)) + list_del(&udev->timedout_entry); + spin_unlock_bh(&timed_out_udevs_lock); + + /* Upper layer should drain all requests before calling this */ + mutex_lock(&udev->cmdr_lock); + xa_for_each(&udev->commands, i, cmd) { + if (tcmu_check_and_free_pending_cmd(cmd) != 0) + all_expired = false; + } + /* There can be left over TMR cmds. Remove them. */ + tcmu_remove_all_queued_tmr(udev); + if (!list_empty(&udev->qfull_queue)) + all_expired = false; + xa_destroy(&udev->commands); + WARN_ON(!all_expired); + + tcmu_blocks_release(udev, 0, udev->dbi_max); + bitmap_free(udev->data_bitmap); + mutex_unlock(&udev->cmdr_lock); + + pr_debug("dev_kref_release\n"); + + call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); +} + +static void run_qfull_queue(struct tcmu_dev *udev, bool fail) +{ + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; + LIST_HEAD(cmds); + sense_reason_t scsi_ret; + int ret; + + if (list_empty(&udev->qfull_queue)) + return; + + pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); + + list_splice_init(&udev->qfull_queue, &cmds); + + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { + list_del_init(&tcmu_cmd->queue_entry); + + pr_debug("removing cmd %p on dev %s from queue\n", + tcmu_cmd, udev->name); + + if (fail) { + /* + * We were not able to even start the command, so + * fail with busy to allow a retry in case runner + * was only temporarily down. If the device is being + * removed then LIO core will do the right thing and + * fail the retry. + */ + tcmu_cmd->se_cmd->priv = NULL; + target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); + tcmu_free_cmd(tcmu_cmd); + continue; + } + + ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); + if (ret < 0) { + pr_debug("cmd %p on dev %s failed with %u\n", + tcmu_cmd, udev->name, scsi_ret); + /* + * Ignore scsi_ret for now. target_complete_cmd + * drops it. + */ + tcmu_cmd->se_cmd->priv = NULL; + target_complete_cmd(tcmu_cmd->se_cmd, + SAM_STAT_CHECK_CONDITION); + tcmu_free_cmd(tcmu_cmd); + } else if (ret > 0) { + pr_debug("ran out of space during cmdr queue run\n"); + /* + * cmd was requeued, so just put all cmds back in + * the queue + */ + list_splice_tail(&cmds, &udev->qfull_queue); + break; + } + } + + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); +} + +static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + + mutex_lock(&udev->cmdr_lock); + if (tcmu_handle_completions(udev)) + run_qfull_queue(udev, false); + mutex_unlock(&udev->cmdr_lock); + + return 0; +} + +/* + * mmap code from uio.c. Copied here because we want to hook mmap() + * and this stuff must come along. + */ +static int tcmu_find_mem_index(struct vm_area_struct *vma) +{ + struct tcmu_dev *udev = vma->vm_private_data; + struct uio_info *info = &udev->uio_info; + + if (vma->vm_pgoff < MAX_UIO_MAPS) { + if (info->mem[vma->vm_pgoff].size == 0) + return -1; + return (int)vma->vm_pgoff; + } + return -1; +} + +static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) +{ + struct page *page; + + mutex_lock(&udev->cmdr_lock); + page = xa_load(&udev->data_pages, dpi); + if (likely(page)) { + get_page(page); + lock_page(page); + mutex_unlock(&udev->cmdr_lock); + return page; + } + + /* + * Userspace messed up and passed in a address not in the + * data iov passed to it. + */ + pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", + dpi, udev->name); + mutex_unlock(&udev->cmdr_lock); + + return NULL; +} + +static void tcmu_vma_open(struct vm_area_struct *vma) +{ + struct tcmu_dev *udev = vma->vm_private_data; + + pr_debug("vma_open\n"); + + kref_get(&udev->kref); +} + +static void tcmu_vma_close(struct vm_area_struct *vma) +{ + struct tcmu_dev *udev = vma->vm_private_data; + + pr_debug("vma_close\n"); + + /* release ref from tcmu_vma_open */ + kref_put(&udev->kref, tcmu_dev_kref_release); +} + +static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) +{ + struct tcmu_dev *udev = vmf->vma->vm_private_data; + struct uio_info *info = &udev->uio_info; + struct page *page; + unsigned long offset; + void *addr; + vm_fault_t ret = 0; + + int mi = tcmu_find_mem_index(vmf->vma); + if (mi < 0) + return VM_FAULT_SIGBUS; + + /* + * We need to subtract mi because userspace uses offset = N*PAGE_SIZE + * to use mem[N]. + */ + offset = (vmf->pgoff - mi) << PAGE_SHIFT; + + if (offset < udev->data_off) { + /* For the vmalloc()ed cmd area pages */ + addr = (void *)(unsigned long)info->mem[mi].addr + offset; + page = vmalloc_to_page(addr); + get_page(page); + } else { + uint32_t dpi; + + /* For the dynamically growing data area pages */ + dpi = (offset - udev->data_off) / PAGE_SIZE; + page = tcmu_try_get_data_page(udev, dpi); + if (!page) + return VM_FAULT_SIGBUS; + ret = VM_FAULT_LOCKED; + } + + vmf->page = page; + return ret; +} + +static const struct vm_operations_struct tcmu_vm_ops = { + .open = tcmu_vma_open, + .close = tcmu_vma_close, + .fault = tcmu_vma_fault, +}; + +static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &tcmu_vm_ops; + + vma->vm_private_data = udev; + + /* Ensure the mmap is exactly the right size */ + if (vma_pages(vma) != udev->mmap_pages) + return -EINVAL; + + tcmu_vma_open(vma); + + return 0; +} + +static int tcmu_open(struct uio_info *info, struct inode *inode) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + + /* O_EXCL not supported for char devs, so fake it? */ + if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) + return -EBUSY; + + udev->inode = inode; + + pr_debug("open\n"); + + return 0; +} + +static int tcmu_release(struct uio_info *info, struct inode *inode) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + struct tcmu_cmd *cmd; + unsigned long i; + bool freed = false; + + mutex_lock(&udev->cmdr_lock); + + xa_for_each(&udev->commands, i, cmd) { + /* Cmds with KEEP_BUF set are no longer on the ring, but + * userspace still holds the data buffer. If userspace closes + * we implicitly free these cmds and buffers, since after new + * open the (new ?) userspace cannot find the cmd in the ring + * and thus never will release the buffer by writing cmd_id to + * free_kept_buf action attribute. + */ + if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) + continue; + pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n", + cmd->cmd_id, udev->name); + freed = true; + + xa_erase(&udev->commands, i); + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + } + /* + * We only freed data space, not ring space. Therefore we dont call + * run_tmr_queue, but call run_qfull_queue if tmr_list is empty. + */ + if (freed && list_empty(&udev->tmr_queue)) + run_qfull_queue(udev, false); + + mutex_unlock(&udev->cmdr_lock); + + clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); + + pr_debug("close\n"); + + return 0; +} + +static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) +{ + struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; + + if (!tcmu_kern_cmd_reply_supported) + return 0; + + if (udev->nl_reply_supported <= 0) + return 0; + + mutex_lock(&tcmu_nl_cmd_mutex); + + if (tcmu_netlink_blocked) { + mutex_unlock(&tcmu_nl_cmd_mutex); + pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, + udev->name); + return -EAGAIN; + } + + if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { + mutex_unlock(&tcmu_nl_cmd_mutex); + pr_warn("netlink cmd %d already executing on %s\n", + nl_cmd->cmd, udev->name); + return -EBUSY; + } + + memset(nl_cmd, 0, sizeof(*nl_cmd)); + nl_cmd->cmd = cmd; + nl_cmd->udev = udev; + init_completion(&nl_cmd->complete); + INIT_LIST_HEAD(&nl_cmd->nl_list); + + list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); + + mutex_unlock(&tcmu_nl_cmd_mutex); + return 0; +} + +static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) +{ + struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; + + if (!tcmu_kern_cmd_reply_supported) + return; + + if (udev->nl_reply_supported <= 0) + return; + + mutex_lock(&tcmu_nl_cmd_mutex); + + list_del(&nl_cmd->nl_list); + memset(nl_cmd, 0, sizeof(*nl_cmd)); + + mutex_unlock(&tcmu_nl_cmd_mutex); +} + +static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) +{ + struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; + int ret; + + if (!tcmu_kern_cmd_reply_supported) + return 0; + + if (udev->nl_reply_supported <= 0) + return 0; + + pr_debug("sleeping for nl reply\n"); + wait_for_completion(&nl_cmd->complete); + + mutex_lock(&tcmu_nl_cmd_mutex); + nl_cmd->cmd = TCMU_CMD_UNSPEC; + ret = nl_cmd->status; + mutex_unlock(&tcmu_nl_cmd_mutex); + + return ret; +} + +static int tcmu_netlink_event_init(struct tcmu_dev *udev, + enum tcmu_genl_cmd cmd, + struct sk_buff **buf, void **hdr) +{ + struct sk_buff *skb; + void *msg_header; + int ret = -ENOMEM; + + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return ret; + + msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); + if (!msg_header) + goto free_skb; + + ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); + if (ret < 0) + goto free_skb; + + ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); + if (ret < 0) + goto free_skb; + + ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); + if (ret < 0) + goto free_skb; + + *buf = skb; + *hdr = msg_header; + return ret; + +free_skb: + nlmsg_free(skb); + return ret; +} + +static int tcmu_netlink_event_send(struct tcmu_dev *udev, + enum tcmu_genl_cmd cmd, + struct sk_buff *skb, void *msg_header) +{ + int ret; + + genlmsg_end(skb, msg_header); + + ret = tcmu_init_genl_cmd_reply(udev, cmd); + if (ret) { + nlmsg_free(skb); + return ret; + } + + ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, + TCMU_MCGRP_CONFIG, GFP_KERNEL); + + /* Wait during an add as the listener may not be up yet */ + if (ret == 0 || + (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) + return tcmu_wait_genl_cmd_reply(udev); + else + tcmu_destroy_genl_cmd_reply(udev); + + return ret; +} + +static int tcmu_send_dev_add_event(struct tcmu_dev *udev) +{ + struct sk_buff *skb = NULL; + void *msg_header = NULL; + int ret = 0; + + ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, + &msg_header); + if (ret < 0) + return ret; + return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, + msg_header); +} + +static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) +{ + struct sk_buff *skb = NULL; + void *msg_header = NULL; + int ret = 0; + + ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, + &skb, &msg_header); + if (ret < 0) + return ret; + return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, + skb, msg_header); +} + +static int tcmu_update_uio_info(struct tcmu_dev *udev) +{ + struct tcmu_hba *hba = udev->hba->hba_ptr; + struct uio_info *info; + char *str; + + info = &udev->uio_info; + + if (udev->dev_config[0]) + str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, + udev->name, udev->dev_config); + else + str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, + udev->name); + if (!str) + return -ENOMEM; + + /* If the old string exists, free it */ + kfree(info->name); + info->name = str; + + return 0; +} + +static int tcmu_configure_device(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + struct uio_info *info; + struct tcmu_mailbox *mb; + size_t data_size; + int ret = 0; + + ret = tcmu_update_uio_info(udev); + if (ret) + return ret; + + info = &udev->uio_info; + + mutex_lock(&udev->cmdr_lock); + udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); + mutex_unlock(&udev->cmdr_lock); + if (!udev->data_bitmap) { + ret = -ENOMEM; + goto err_bitmap_alloc; + } + + mb = vzalloc(udev->cmdr_size + CMDR_OFF); + if (!mb) { + ret = -ENOMEM; + goto err_vzalloc; + } + + /* mailbox fits in first part of CMDR space */ + udev->mb_addr = mb; + udev->cmdr = (void *)mb + CMDR_OFF; + udev->data_off = udev->cmdr_size + CMDR_OFF; + data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; + udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; + udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; + udev->dbi_thresh = 0; /* Default in Idle state */ + + /* Initialise the mailbox of the ring buffer */ + mb->version = TCMU_MAILBOX_VERSION; + mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | + TCMU_MAILBOX_FLAG_CAP_READ_LEN | + TCMU_MAILBOX_FLAG_CAP_TMR | + TCMU_MAILBOX_FLAG_CAP_KEEP_BUF; + mb->cmdr_off = CMDR_OFF; + mb->cmdr_size = udev->cmdr_size; + + WARN_ON(!PAGE_ALIGNED(udev->data_off)); + WARN_ON(data_size % PAGE_SIZE); + + info->version = __stringify(TCMU_MAILBOX_VERSION); + + info->mem[0].name = "tcm-user command & data buffer"; + info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; + info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; + info->mem[0].memtype = UIO_MEM_NONE; + + info->irqcontrol = tcmu_irqcontrol; + info->irq = UIO_IRQ_CUSTOM; + + info->mmap = tcmu_mmap; + info->open = tcmu_open; + info->release = tcmu_release; + + ret = uio_register_device(tcmu_root_device, info); + if (ret) + goto err_register; + + /* User can set hw_block_size before enable the device */ + if (dev->dev_attrib.hw_block_size == 0) + dev->dev_attrib.hw_block_size = 512; + /* Other attributes can be configured in userspace */ + if (!dev->dev_attrib.hw_max_sectors) + dev->dev_attrib.hw_max_sectors = 128; + if (!dev->dev_attrib.emulate_write_cache) + dev->dev_attrib.emulate_write_cache = 0; + dev->dev_attrib.hw_queue_depth = 128; + + /* If user didn't explicitly disable netlink reply support, use + * module scope setting. + */ + if (udev->nl_reply_supported >= 0) + udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; + + /* + * Get a ref incase userspace does a close on the uio device before + * LIO has initiated tcmu_free_device. + */ + kref_get(&udev->kref); + + ret = tcmu_send_dev_add_event(udev); + if (ret) + goto err_netlink; + + mutex_lock(&root_udev_mutex); + list_add(&udev->node, &root_udev); + mutex_unlock(&root_udev_mutex); + + return 0; + +err_netlink: + kref_put(&udev->kref, tcmu_dev_kref_release); + uio_unregister_device(&udev->uio_info); +err_register: + vfree(udev->mb_addr); + udev->mb_addr = NULL; +err_vzalloc: + bitmap_free(udev->data_bitmap); + udev->data_bitmap = NULL; +err_bitmap_alloc: + kfree(info->name); + info->name = NULL; + + return ret; +} + +static void tcmu_free_device(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + + /* release ref from init */ + kref_put(&udev->kref, tcmu_dev_kref_release); +} + +static void tcmu_destroy_device(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + + del_timer_sync(&udev->cmd_timer); + del_timer_sync(&udev->qfull_timer); + + mutex_lock(&root_udev_mutex); + list_del(&udev->node); + mutex_unlock(&root_udev_mutex); + + tcmu_send_dev_remove_event(udev); + + uio_unregister_device(&udev->uio_info); + + /* release ref from configure */ + kref_put(&udev->kref, tcmu_dev_kref_release); +} + +static void tcmu_unblock_dev(struct tcmu_dev *udev) +{ + mutex_lock(&udev->cmdr_lock); + clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); + mutex_unlock(&udev->cmdr_lock); +} + +static void tcmu_block_dev(struct tcmu_dev *udev) +{ + mutex_lock(&udev->cmdr_lock); + + if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) + goto unlock; + + /* complete IO that has executed successfully */ + tcmu_handle_completions(udev); + /* fail IO waiting to be queued */ + run_qfull_queue(udev, true); + +unlock: + mutex_unlock(&udev->cmdr_lock); +} + +static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) +{ + struct tcmu_mailbox *mb; + struct tcmu_cmd *cmd; + unsigned long i; + + mutex_lock(&udev->cmdr_lock); + + xa_for_each(&udev->commands, i, cmd) { + pr_debug("removing cmd %u on dev %s from ring %s\n", + cmd->cmd_id, udev->name, + test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ? + "(is expired)" : + (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ? + "(is keep buffer)" : "")); + + xa_erase(&udev->commands, i); + if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) && + !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { + WARN_ON(!cmd->se_cmd); + list_del_init(&cmd->queue_entry); + cmd->se_cmd->priv = NULL; + if (err_level == 1) { + /* + * Userspace was not able to start the + * command or it is retryable. + */ + target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); + } else { + /* hard failure */ + target_complete_cmd(cmd->se_cmd, + SAM_STAT_CHECK_CONDITION); + } + } + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + } + + mb = udev->mb_addr; + tcmu_flush_dcache_range(mb, sizeof(*mb)); + pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, + mb->cmd_tail, mb->cmd_head); + + udev->cmdr_last_cleaned = 0; + mb->cmd_tail = 0; + mb->cmd_head = 0; + tcmu_flush_dcache_range(mb, sizeof(*mb)); + clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + + del_timer(&udev->cmd_timer); + + /* + * ring is empty and qfull queue never contains aborted commands. + * So TMRs in tmr queue do not contain relevant cmd_ids. + * After a ring reset userspace should do a fresh start, so + * even LUN RESET message is no longer relevant. + * Therefore remove all TMRs from qfull queue + */ + tcmu_remove_all_queued_tmr(udev); + + run_qfull_queue(udev, false); + + mutex_unlock(&udev->cmdr_lock); +} + +enum { + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, + Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk, + Opt_cmd_ring_size_mb, Opt_err, +}; + +static match_table_t tokens = { + {Opt_dev_config, "dev_config=%s"}, + {Opt_dev_size, "dev_size=%s"}, + {Opt_hw_block_size, "hw_block_size=%d"}, + {Opt_hw_max_sectors, "hw_max_sectors=%d"}, + {Opt_nl_reply_supported, "nl_reply_supported=%d"}, + {Opt_max_data_area_mb, "max_data_area_mb=%d"}, + {Opt_data_pages_per_blk, "data_pages_per_blk=%d"}, + {Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"}, + {Opt_err, NULL} +}; + +static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) +{ + int val, ret; + + ret = match_int(arg, &val); + if (ret < 0) { + pr_err("match_int() failed for dev attrib. Error %d.\n", + ret); + return ret; + } + + if (val <= 0) { + pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", + val); + return -EINVAL; + } + *dev_attrib = val; + return 0; +} + +static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) +{ + int val, ret; + uint32_t pages_per_blk = udev->data_pages_per_blk; + + ret = match_int(arg, &val); + if (ret < 0) { + pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", + ret); + return ret; + } + if (val <= 0) { + pr_err("Invalid max_data_area %d.\n", val); + return -EINVAL; + } + if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) { + pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", + val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); + val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages); + } + if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) { + pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n", + val, TCMU_MBS_TO_PAGES(val), pages_per_blk); + return -EINVAL; + } + + mutex_lock(&udev->cmdr_lock); + if (udev->data_bitmap) { + pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); + ret = -EINVAL; + goto unlock; + } + + udev->data_area_mb = val; + udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk; + +unlock: + mutex_unlock(&udev->cmdr_lock); + return ret; +} + +static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg) +{ + int val, ret; + + ret = match_int(arg, &val); + if (ret < 0) { + pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n", + ret); + return ret; + } + + if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) { + pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n", + val, udev->data_area_mb, + TCMU_MBS_TO_PAGES(udev->data_area_mb)); + return -EINVAL; + } + + mutex_lock(&udev->cmdr_lock); + if (udev->data_bitmap) { + pr_err("Cannot set data_pages_per_blk after it has been enabled.\n"); + ret = -EINVAL; + goto unlock; + } + + udev->data_pages_per_blk = val; + udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val; + +unlock: + mutex_unlock(&udev->cmdr_lock); + return ret; +} + +static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg) +{ + int val, ret; + + ret = match_int(arg, &val); + if (ret < 0) { + pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n", + ret); + return ret; + } + + if (val <= 0) { + pr_err("Invalid cmd_ring_size_mb %d.\n", val); + return -EINVAL; + } + + mutex_lock(&udev->cmdr_lock); + if (udev->data_bitmap) { + pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n"); + ret = -EINVAL; + goto unlock; + } + + udev->cmdr_size = (val << 20) - CMDR_OFF; + if (val > (MB_CMDR_SIZE_DEF >> 20)) { + pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n", + val, (MB_CMDR_SIZE_DEF >> 20)); + udev->cmdr_size = CMDR_SIZE_DEF; + } + +unlock: + mutex_unlock(&udev->cmdr_lock); + return ret; +} + +static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, + const char *page, ssize_t count) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + char *orig, *ptr, *opts; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, token; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",\n")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_dev_config: + if (match_strlcpy(udev->dev_config, &args[0], + TCMU_CONFIG_LEN) == 0) { + ret = -EINVAL; + break; + } + pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); + break; + case Opt_dev_size: + ret = match_u64(&args[0], &udev->dev_size); + if (ret < 0) + pr_err("match_u64() failed for dev_size=. Error %d.\n", + ret); + break; + case Opt_hw_block_size: + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_block_size)); + break; + case Opt_hw_max_sectors: + ret = tcmu_set_dev_attrib(&args[0], + &(dev->dev_attrib.hw_max_sectors)); + break; + case Opt_nl_reply_supported: + ret = match_int(&args[0], &udev->nl_reply_supported); + if (ret < 0) + pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", + ret); + break; + case Opt_max_data_area_mb: + ret = tcmu_set_max_blocks_param(udev, &args[0]); + break; + case Opt_data_pages_per_blk: + ret = tcmu_set_data_pages_per_blk(udev, &args[0]); + break; + case Opt_cmd_ring_size_mb: + ret = tcmu_set_cmd_ring_size(udev, &args[0]); + break; + default: + break; + } + + if (ret) + break; + } + + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + ssize_t bl = 0; + + bl = sprintf(b + bl, "Config: %s ", + udev->dev_config[0] ? udev->dev_config : "NULL"); + bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); + bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb); + bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk); + bl += sprintf(b + bl, "CmdRingSizeMB: %u\n", + (udev->cmdr_size + CMDR_OFF) >> 20); + + return bl; +} + +static sector_t tcmu_get_blocks(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + + return div_u64(udev->dev_size - dev->dev_attrib.block_size, + dev->dev_attrib.block_size); +} + +static sense_reason_t +tcmu_parse_cdb(struct se_cmd *cmd) +{ + return passthrough_parse_cdb(cmd, tcmu_queue_cmd); +} + +static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); +} + +static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = container_of(da->da_dev, + struct tcmu_dev, se_dev); + u32 val; + int ret; + + if (da->da_dev->export_count) { + pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); + return -EINVAL; + } + + ret = kstrtou32(page, 0, &val); + if (ret < 0) + return ret; + + udev->cmd_time_out = val * MSEC_PER_SEC; + return count; +} +CONFIGFS_ATTR(tcmu_, cmd_time_out); + +static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? + udev->qfull_time_out : + udev->qfull_time_out / MSEC_PER_SEC); +} + +static ssize_t tcmu_qfull_time_out_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + s32 val; + int ret; + + ret = kstrtos32(page, 0, &val); + if (ret < 0) + return ret; + + if (val >= 0) { + udev->qfull_time_out = val * MSEC_PER_SEC; + } else if (val == -1) { + udev->qfull_time_out = val; + } else { + printk(KERN_ERR "Invalid qfull timeout value %d\n", val); + return -EINVAL; + } + return count; +} +CONFIGFS_ATTR(tcmu_, qfull_time_out); + +static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); +} +CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); + +static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item, + char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk); +} +CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk); + +static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%u\n", + (udev->cmdr_size + CMDR_OFF) >> 20); +} +CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb); + +static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); +} + +static int tcmu_send_dev_config_event(struct tcmu_dev *udev, + const char *reconfig_data) +{ + struct sk_buff *skb = NULL; + void *msg_header = NULL; + int ret = 0; + + ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, + &skb, &msg_header); + if (ret < 0) + return ret; + ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); + if (ret < 0) { + nlmsg_free(skb); + return ret; + } + return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, + skb, msg_header); +} + + +static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + int ret, len; + + len = strlen(page); + if (!len || len > TCMU_CONFIG_LEN - 1) + return -EINVAL; + + /* Check if device has been configured before */ + if (target_dev_configured(&udev->se_dev)) { + ret = tcmu_send_dev_config_event(udev, page); + if (ret) { + pr_err("Unable to reconfigure device\n"); + return ret; + } + strscpy(udev->dev_config, page, TCMU_CONFIG_LEN); + + ret = tcmu_update_uio_info(udev); + if (ret) + return ret; + return count; + } + strscpy(udev->dev_config, page, TCMU_CONFIG_LEN); + + return count; +} +CONFIGFS_ATTR(tcmu_, dev_config); + +static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); +} + +static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) +{ + struct sk_buff *skb = NULL; + void *msg_header = NULL; + int ret = 0; + + ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, + &skb, &msg_header); + if (ret < 0) + return ret; + ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, + size, TCMU_ATTR_PAD); + if (ret < 0) { + nlmsg_free(skb); + return ret; + } + return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, + skb, msg_header); +} + +static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + u64 val; + int ret; + + ret = kstrtou64(page, 0, &val); + if (ret < 0) + return ret; + + /* Check if device has been configured before */ + if (target_dev_configured(&udev->se_dev)) { + ret = tcmu_send_dev_size_event(udev, val); + if (ret) { + pr_err("Unable to reconfigure device\n"); + return ret; + } + } + udev->dev_size = val; + return count; +} +CONFIGFS_ATTR(tcmu_, dev_size); + +static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, + char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); +} + +static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + s8 val; + int ret; + + ret = kstrtos8(page, 0, &val); + if (ret < 0) + return ret; + + udev->nl_reply_supported = val; + return count; +} +CONFIGFS_ATTR(tcmu_, nl_reply_supported); + +static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, + char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + + return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); +} + +static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) +{ + struct sk_buff *skb = NULL; + void *msg_header = NULL; + int ret = 0; + + ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, + &skb, &msg_header); + if (ret < 0) + return ret; + ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); + if (ret < 0) { + nlmsg_free(skb); + return ret; + } + return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, + skb, msg_header); +} + +static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + u8 val; + int ret; + + ret = kstrtou8(page, 0, &val); + if (ret < 0) + return ret; + + /* Check if device has been configured before */ + if (target_dev_configured(&udev->se_dev)) { + ret = tcmu_send_emulate_write_cache(udev, val); + if (ret) { + pr_err("Unable to reconfigure device\n"); + return ret; + } + } + + da->emulate_write_cache = val; + return count; +} +CONFIGFS_ATTR(tcmu_, emulate_write_cache); + +static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%i\n", + test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); +} + +static ssize_t tcmu_tmr_notification_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + u8 val; + int ret; + + ret = kstrtou8(page, 0, &val); + if (ret < 0) + return ret; + if (val > 1) + return -EINVAL; + + if (val) + set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); + return count; +} +CONFIGFS_ATTR(tcmu_, tmr_notification); + +static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); +} + +static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + u8 val; + int ret; + + if (!target_dev_configured(&udev->se_dev)) { + pr_err("Device is not configured.\n"); + return -EINVAL; + } + + ret = kstrtou8(page, 0, &val); + if (ret < 0) + return ret; + + if (val > 1) { + pr_err("Invalid block value %d\n", val); + return -EINVAL; + } + + if (!val) + tcmu_unblock_dev(udev); + else + tcmu_block_dev(udev); + return count; +} +CONFIGFS_ATTR(tcmu_, block_dev); + +static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + u8 val; + int ret; + + if (!target_dev_configured(&udev->se_dev)) { + pr_err("Device is not configured.\n"); + return -EINVAL; + } + + ret = kstrtou8(page, 0, &val); + if (ret < 0) + return ret; + + if (val != 1 && val != 2) { + pr_err("Invalid reset ring value %d\n", val); + return -EINVAL; + } + + tcmu_reset_ring(udev, val); + return count; +} +CONFIGFS_ATTR_WO(tcmu_, reset_ring); + +static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + struct tcmu_cmd *cmd; + u16 cmd_id; + int ret; + + if (!target_dev_configured(&udev->se_dev)) { + pr_err("Device is not configured.\n"); + return -EINVAL; + } + + ret = kstrtou16(page, 0, &cmd_id); + if (ret < 0) + return ret; + + mutex_lock(&udev->cmdr_lock); + + { + XA_STATE(xas, &udev->commands, cmd_id); + + xas_lock(&xas); + cmd = xas_load(&xas); + if (!cmd) { + pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id); + count = -EINVAL; + xas_unlock(&xas); + goto out_unlock; + } + if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { + pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n", + cmd_id); + count = -EINVAL; + xas_unlock(&xas); + goto out_unlock; + } + xas_store(&xas, NULL); + xas_unlock(&xas); + } + + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + /* + * We only freed data space, not ring space. Therefore we dont call + * run_tmr_queue, but call run_qfull_queue if tmr_list is empty. + */ + if (list_empty(&udev->tmr_queue)) + run_qfull_queue(udev, false); + +out_unlock: + mutex_unlock(&udev->cmdr_lock); + return count; +} +CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); + +static struct configfs_attribute *tcmu_attrib_attrs[] = { + &tcmu_attr_cmd_time_out, + &tcmu_attr_qfull_time_out, + &tcmu_attr_max_data_area_mb, + &tcmu_attr_data_pages_per_blk, + &tcmu_attr_cmd_ring_size_mb, + &tcmu_attr_dev_config, + &tcmu_attr_dev_size, + &tcmu_attr_emulate_write_cache, + &tcmu_attr_tmr_notification, + &tcmu_attr_nl_reply_supported, + NULL, +}; + +static struct configfs_attribute **tcmu_attrs; + +static struct configfs_attribute *tcmu_action_attrs[] = { + &tcmu_attr_block_dev, + &tcmu_attr_reset_ring, + &tcmu_attr_free_kept_buf, + NULL, +}; + +static struct target_backend_ops tcmu_ops = { + .name = "user", + .owner = THIS_MODULE, + .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH, + .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR | + TRANSPORT_FLAG_PASSTHROUGH_ALUA, + .attach_hba = tcmu_attach_hba, + .detach_hba = tcmu_detach_hba, + .alloc_device = tcmu_alloc_device, + .configure_device = tcmu_configure_device, + .destroy_device = tcmu_destroy_device, + .free_device = tcmu_free_device, + .unplug_device = tcmu_unplug_device, + .plug_device = tcmu_plug_device, + .parse_cdb = tcmu_parse_cdb, + .tmr_notify = tcmu_tmr_notify, + .set_configfs_dev_params = tcmu_set_configfs_dev_params, + .show_configfs_dev_params = tcmu_show_configfs_dev_params, + .get_device_type = sbc_get_device_type, + .get_blocks = tcmu_get_blocks, + .tb_dev_action_attrs = tcmu_action_attrs, +}; + +static void find_free_blocks(void) +{ + struct tcmu_dev *udev; + loff_t off; + u32 pages_freed, total_pages_freed = 0; + u32 start, end, block, total_blocks_freed = 0; + + if (atomic_read(&global_page_count) <= tcmu_global_max_pages) + return; + + mutex_lock(&root_udev_mutex); + list_for_each_entry(udev, &root_udev, node) { + mutex_lock(&udev->cmdr_lock); + + if (!target_dev_configured(&udev->se_dev)) { + mutex_unlock(&udev->cmdr_lock); + continue; + } + + /* Try to complete the finished commands first */ + if (tcmu_handle_completions(udev)) + run_qfull_queue(udev, false); + + /* Skip the udevs in idle */ + if (!udev->dbi_thresh) { + mutex_unlock(&udev->cmdr_lock); + continue; + } + + end = udev->dbi_max + 1; + block = find_last_bit(udev->data_bitmap, end); + if (block == udev->dbi_max) { + /* + * The last bit is dbi_max, so it is not possible + * reclaim any blocks. + */ + mutex_unlock(&udev->cmdr_lock); + continue; + } else if (block == end) { + /* The current udev will goto idle state */ + udev->dbi_thresh = start = 0; + udev->dbi_max = 0; + } else { + udev->dbi_thresh = start = block + 1; + udev->dbi_max = block; + } + + /* + * Release the block pages. + * + * Also note that since tcmu_vma_fault() gets an extra page + * refcount, tcmu_blocks_release() won't free pages if pages + * are mapped. This means it is safe to call + * tcmu_blocks_release() before unmap_mapping_range() which + * drops the refcount of any pages it unmaps and thus releases + * them. + */ + pages_freed = tcmu_blocks_release(udev, start, end - 1); + + /* Here will truncate the data area from off */ + off = udev->data_off + (loff_t)start * udev->data_blk_size; + unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); + + mutex_unlock(&udev->cmdr_lock); + + total_pages_freed += pages_freed; + total_blocks_freed += end - start; + pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n", + pages_freed, total_pages_freed, end - start, + total_blocks_freed, udev->name); + } + mutex_unlock(&root_udev_mutex); + + if (atomic_read(&global_page_count) > tcmu_global_max_pages) + schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); +} + +static void check_timedout_devices(void) +{ + struct tcmu_dev *udev, *tmp_dev; + struct tcmu_cmd *cmd, *tmp_cmd; + LIST_HEAD(devs); + + spin_lock_bh(&timed_out_udevs_lock); + list_splice_init(&timed_out_udevs, &devs); + + list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { + list_del_init(&udev->timedout_entry); + spin_unlock_bh(&timed_out_udevs_lock); + + mutex_lock(&udev->cmdr_lock); + + /* + * If cmd_time_out is disabled but qfull is set deadline + * will only reflect the qfull timeout. Ignore it. + */ + if (udev->cmd_time_out) { + list_for_each_entry_safe(cmd, tmp_cmd, + &udev->inflight_queue, + queue_entry) { + tcmu_check_expired_ring_cmd(cmd); + } + tcmu_set_next_deadline(&udev->inflight_queue, + &udev->cmd_timer); + } + list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, + queue_entry) { + tcmu_check_expired_queue_cmd(cmd); + } + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); + + mutex_unlock(&udev->cmdr_lock); + + spin_lock_bh(&timed_out_udevs_lock); + } + + spin_unlock_bh(&timed_out_udevs_lock); +} + +static void tcmu_unmap_work_fn(struct work_struct *work) +{ + check_timedout_devices(); + find_free_blocks(); +} + +static int __init tcmu_module_init(void) +{ + int ret, i, k, len = 0; + + BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); + + INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); + + tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", + sizeof(struct tcmu_cmd), + __alignof__(struct tcmu_cmd), + 0, NULL); + if (!tcmu_cmd_cache) + return -ENOMEM; + + tcmu_root_device = root_device_register("tcm_user"); + if (IS_ERR(tcmu_root_device)) { + ret = PTR_ERR(tcmu_root_device); + goto out_free_cache; + } + + ret = genl_register_family(&tcmu_genl_family); + if (ret < 0) { + goto out_unreg_device; + } + + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) + len += sizeof(struct configfs_attribute *); + for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++) + len += sizeof(struct configfs_attribute *); + for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) + len += sizeof(struct configfs_attribute *); + len += sizeof(struct configfs_attribute *); + + tcmu_attrs = kzalloc(len, GFP_KERNEL); + if (!tcmu_attrs) { + ret = -ENOMEM; + goto out_unreg_genl; + } + + for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) + tcmu_attrs[i] = passthrough_attrib_attrs[i]; + for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++) + tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k]; + for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) + tcmu_attrs[i++] = tcmu_attrib_attrs[k]; + tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; + + ret = transport_backend_register(&tcmu_ops); + if (ret) + goto out_attrs; + + return 0; + +out_attrs: + kfree(tcmu_attrs); +out_unreg_genl: + genl_unregister_family(&tcmu_genl_family); +out_unreg_device: + root_device_unregister(tcmu_root_device); +out_free_cache: + kmem_cache_destroy(tcmu_cmd_cache); + + return ret; +} + +static void __exit tcmu_module_exit(void) +{ + cancel_delayed_work_sync(&tcmu_unmap_work); + target_backend_unregister(&tcmu_ops); + kfree(tcmu_attrs); + genl_unregister_family(&tcmu_genl_family); + root_device_unregister(tcmu_root_device); + kmem_cache_destroy(tcmu_cmd_cache); +} + +MODULE_DESCRIPTION("TCM USER subsystem plugin"); +MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); +MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); +MODULE_LICENSE("GPL"); + +module_init(tcmu_module_init); +module_exit(tcmu_module_exit); diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c new file mode 100644 index 0000000000..91ed015b58 --- /dev/null +++ b/drivers/target/target_core_xcopy.c @@ -0,0 +1,1040 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: target_core_xcopy.c + * + * This file contains support for SPC-4 Extended-Copy offload with generic + * TCM backends. + * + * Copyright (c) 2011-2013 Datera, Inc. All rights reserved. + * + * Author: + * Nicholas A. Bellinger <nab@daterainc.com> + * + ******************************************************************************/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/configfs.h> +#include <linux/ratelimit.h> +#include <scsi/scsi_proto.h> +#include <asm/unaligned.h> + +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" +#include "target_core_pr.h" +#include "target_core_ua.h" +#include "target_core_xcopy.h" + +static struct workqueue_struct *xcopy_wq = NULL; + +static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop); + +/** + * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers + * + * @se_dev: device being considered for match + * @dev_wwn: XCOPY requested NAA dev_wwn + * @return: 1 on match, 0 on no-match + */ +static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, + const unsigned char *dev_wwn) +{ + unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + int rc; + + if (!se_dev->dev_attrib.emulate_3pc) { + pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev); + return 0; + } + + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); + spc_gen_naa_6h_vendor_specific(se_dev, &tmp_dev_wwn[0]); + + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); + if (rc != 0) { + pr_debug("XCOPY: skip non-matching: %*ph\n", + XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn); + return 0; + } + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); + + return 1; +} + +static int target_xcopy_locate_se_dev_e4(struct se_session *sess, + const unsigned char *dev_wwn, + struct se_device **_found_dev, + struct percpu_ref **_found_lun_ref) +{ + struct se_dev_entry *deve; + struct se_node_acl *nacl; + struct se_lun *this_lun = NULL; + struct se_device *found_dev = NULL; + + /* cmd with NULL sess indicates no associated $FABRIC_MOD */ + if (!sess) + goto err_out; + + pr_debug("XCOPY 0xe4: searching for: %*ph\n", + XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn); + + nacl = sess->se_node_acl; + rcu_read_lock(); + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { + struct se_device *this_dev; + int rc; + + this_lun = deve->se_lun; + this_dev = rcu_dereference_raw(this_lun->lun_se_dev); + + rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn); + if (rc) { + if (percpu_ref_tryget_live(&this_lun->lun_ref)) + found_dev = this_dev; + break; + } + } + rcu_read_unlock(); + if (found_dev == NULL) + goto err_out; + + pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n", + found_dev, &found_dev->dev_group); + *_found_dev = found_dev; + *_found_lun_ref = &this_lun->lun_ref; + return 0; +err_out: + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + return -EINVAL; +} + +static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, + unsigned char *p, unsigned short cscd_index) +{ + unsigned char *desc = p; + unsigned short ript; + u8 desig_len; + /* + * Extract RELATIVE INITIATOR PORT IDENTIFIER + */ + ript = get_unaligned_be16(&desc[2]); + pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript); + /* + * Check for supported code set, association, and designator type + */ + if ((desc[4] & 0x0f) != 0x1) { + pr_err("XCOPY 0xe4: code set of non binary type not supported\n"); + return -EINVAL; + } + if ((desc[5] & 0x30) != 0x00) { + pr_err("XCOPY 0xe4: association other than LUN not supported\n"); + return -EINVAL; + } + if ((desc[5] & 0x0f) != 0x3) { + pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n", + (desc[5] & 0x0f)); + return -EINVAL; + } + /* + * Check for matching 16 byte length for NAA IEEE Registered Extended + * Assigned designator + */ + desig_len = desc[7]; + if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) { + pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len); + return -EINVAL; + } + pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len); + /* + * Check for NAA IEEE Registered Extended Assigned header.. + */ + if ((desc[8] & 0xf0) != 0x60) { + pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n", + (desc[8] & 0xf0)); + return -EINVAL; + } + + if (cscd_index != xop->stdi && cscd_index != xop->dtdi) { + pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor " + "dest\n", cscd_index); + return 0; + } + + if (cscd_index == xop->stdi) { + memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* + * Determine if the source designator matches the local device + */ + if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0], + XCOPY_NAA_IEEE_REGEX_LEN)) { + xop->op_origin = XCOL_SOURCE_RECV_OP; + xop->src_dev = se_cmd->se_dev; + pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" + " received xop\n", xop->src_dev); + } + } + + if (cscd_index == xop->dtdi) { + memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* + * Determine if the destination designator matches the local + * device. If @cscd_index corresponds to both source (stdi) and + * destination (dtdi), or dtdi comes after stdi, then + * XCOL_DEST_RECV_OP wins. + */ + if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], + XCOPY_NAA_IEEE_REGEX_LEN)) { + xop->op_origin = XCOL_DEST_RECV_OP; + xop->dst_dev = se_cmd->se_dev; + pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination" + " received xop\n", xop->dst_dev); + } + } + + return 0; +} + +static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + struct xcopy_op *xop, unsigned char *p, + unsigned short tdll, sense_reason_t *sense_ret) +{ + struct se_device *local_dev = se_cmd->se_dev; + unsigned char *desc = p; + int offset = tdll % XCOPY_TARGET_DESC_LEN, rc; + unsigned short cscd_index = 0; + unsigned short start = 0; + + *sense_ret = TCM_INVALID_PARAMETER_LIST; + + if (offset != 0) { + pr_err("XCOPY target descriptor list length is not" + " multiple of %d\n", XCOPY_TARGET_DESC_LEN); + *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE; + return -EINVAL; + } + if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) { + pr_err("XCOPY target descriptor supports a maximum" + " two src/dest descriptors, tdll: %hu too large..\n", tdll); + /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */ + *sense_ret = TCM_TOO_MANY_TARGET_DESCS; + return -EINVAL; + } + /* + * Generate an IEEE Registered Extended designator based upon the + * se_device the XCOPY was received upon.. + */ + memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); + spc_gen_naa_6h_vendor_specific(local_dev, &xop->local_dev_wwn[0]); + + while (start < tdll) { + /* + * Check target descriptor identification with 0xE4 type, and + * compare the current index with the CSCD descriptor IDs in + * the segment descriptor. Use VPD 0x83 WWPN matching .. + */ + switch (desc[0]) { + case 0xe4: + rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, + &desc[0], cscd_index); + if (rc != 0) + goto out; + start += XCOPY_TARGET_DESC_LEN; + desc += XCOPY_TARGET_DESC_LEN; + cscd_index++; + break; + default: + pr_err("XCOPY unsupported descriptor type code:" + " 0x%02x\n", desc[0]); + *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE; + goto out; + } + } + + switch (xop->op_origin) { + case XCOL_SOURCE_RECV_OP: + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, + xop->dst_tid_wwn, + &xop->dst_dev, + &xop->remote_lun_ref); + break; + case XCOL_DEST_RECV_OP: + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, + xop->src_tid_wwn, + &xop->src_dev, + &xop->remote_lun_ref); + break; + default: + pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " + "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi); + rc = -EINVAL; + break; + } + /* + * If a matching IEEE NAA 0x83 descriptor for the requested device + * is not located on this node, return COPY_ABORTED with ASQ/ASQC + * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the + * initiator to fall back to normal copy method. + */ + if (rc < 0) { + *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE; + goto out; + } + + pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", + xop->src_dev, &xop->src_tid_wwn[0]); + pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", + xop->dst_dev, &xop->dst_tid_wwn[0]); + + return cscd_index; + +out: + return -EINVAL; +} + +static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p) +{ + unsigned char *desc = p; + int dc = (desc[1] & 0x02); + unsigned short desc_len; + + desc_len = get_unaligned_be16(&desc[2]); + if (desc_len != 0x18) { + pr_err("XCOPY segment desc 0x02: Illegal desc_len:" + " %hu\n", desc_len); + return -EINVAL; + } + + xop->stdi = get_unaligned_be16(&desc[4]); + xop->dtdi = get_unaligned_be16(&desc[6]); + + if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX || + xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) { + pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n", + XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi); + return -EINVAL; + } + + pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", + desc_len, xop->stdi, xop->dtdi, dc); + + xop->nolb = get_unaligned_be16(&desc[10]); + xop->src_lba = get_unaligned_be64(&desc[12]); + xop->dst_lba = get_unaligned_be64(&desc[20]); + pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n", + xop->nolb, (unsigned long long)xop->src_lba, + (unsigned long long)xop->dst_lba); + + return 0; +} + +static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop, + unsigned char *p, unsigned int sdll, + sense_reason_t *sense_ret) +{ + unsigned char *desc = p; + unsigned int start = 0; + int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; + + *sense_ret = TCM_INVALID_PARAMETER_LIST; + + if (offset != 0) { + pr_err("XCOPY segment descriptor list length is not" + " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); + *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE; + return -EINVAL; + } + if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) { + pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too" + " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll); + /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */ + *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS; + return -EINVAL; + } + + while (start < sdll) { + /* + * Check segment descriptor type code for block -> block + */ + switch (desc[0]) { + case 0x02: + rc = target_xcopy_parse_segdesc_02(xop, desc); + if (rc < 0) + goto out; + + ret++; + start += XCOPY_SEGMENT_DESC_LEN; + desc += XCOPY_SEGMENT_DESC_LEN; + break; + default: + pr_err("XCOPY unsupported segment descriptor" + "type: 0x%02x\n", desc[0]); + *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE; + goto out; + } + } + + return ret; + +out: + return -EINVAL; +} + +/* + * Start xcopy_pt ops + */ + +struct xcopy_pt_cmd { + struct se_cmd se_cmd; + struct completion xpt_passthrough_sem; + unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; +}; + +struct se_portal_group xcopy_pt_tpg; +static struct se_session xcopy_pt_sess; +static struct se_node_acl xcopy_pt_nacl; + +static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) +{ + if (xop->op_origin == XCOL_SOURCE_RECV_OP) + pr_debug("putting dst lun_ref for %p\n", xop->dst_dev); + else + pr_debug("putting src lun_ref for %p\n", xop->src_dev); + + percpu_ref_put(xop->remote_lun_ref); +} + +static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) +{ + struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, + struct xcopy_pt_cmd, se_cmd); + + /* xpt_cmd is on the stack, nothing to free here */ + pr_debug("xpt_cmd done: %p\n", xpt_cmd); +} + +static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd) +{ + struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, + struct xcopy_pt_cmd, se_cmd); + + complete(&xpt_cmd->xpt_passthrough_sem); + return 0; +} + +static int xcopy_pt_write_pending(struct se_cmd *se_cmd) +{ + return 0; +} + +static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd) +{ + return 0; +} + +static int xcopy_pt_queue_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static const struct target_core_fabric_ops xcopy_pt_tfo = { + .fabric_name = "xcopy-pt", + .get_cmd_state = xcopy_pt_get_cmd_state, + .release_cmd = xcopy_pt_release_cmd, + .check_stop_free = xcopy_pt_check_stop_free, + .write_pending = xcopy_pt_write_pending, + .queue_data_in = xcopy_pt_queue_data_in, + .queue_status = xcopy_pt_queue_status, +}; + +/* + * End xcopy_pt_ops + */ + +int target_xcopy_setup_pt(void) +{ + xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); + if (!xcopy_wq) { + pr_err("Unable to allocate xcopy_wq\n"); + return -ENOMEM; + } + + memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); + INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); + INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); + + xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; + + memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); + INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); + memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); + transport_init_session(&xcopy_pt_sess); + + xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; + xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; + + xcopy_pt_sess.se_tpg = &xcopy_pt_tpg; + xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; + + return 0; +} + +void target_xcopy_release_pt(void) +{ + if (xcopy_wq) + destroy_workqueue(xcopy_wq); +} + +/* + * target_xcopy_setup_pt_cmd - set up a pass-through command + * @xpt_cmd: Data structure to initialize. + * @xop: Describes the XCOPY operation received from an initiator. + * @se_dev: Backend device to associate with @xpt_cmd if + * @remote_port == true. + * @cdb: SCSI CDB to be copied into @xpt_cmd. + * @remote_port: If false, use the LUN through which the XCOPY command has + * been received. If true, use @se_dev->xcopy_lun. + * + * Set up a SCSI command (READ or WRITE) that will be used to execute an + * XCOPY command. + */ +static int target_xcopy_setup_pt_cmd( + struct xcopy_pt_cmd *xpt_cmd, + struct xcopy_op *xop, + struct se_device *se_dev, + unsigned char *cdb, + bool remote_port) +{ + struct se_cmd *cmd = &xpt_cmd->se_cmd; + + /* + * Setup LUN+port to honor reservations based upon xop->op_origin for + * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. + */ + if (remote_port) { + cmd->se_lun = &se_dev->xcopy_lun; + cmd->se_dev = se_dev; + } else { + cmd->se_lun = xop->xop_se_cmd->se_lun; + cmd->se_dev = xop->xop_se_cmd->se_dev; + } + cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + + if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL)) + return -EINVAL; + + cmd->tag = 0; + if (target_cmd_parse_cdb(cmd)) + return -EINVAL; + + if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg, + xop->xop_data_nents, NULL, 0)) + return -EINVAL; + + pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" + " %u\n", cmd->t_data_sg, cmd->t_data_nents); + + return 0; +} + +static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) +{ + struct se_cmd *se_cmd = &xpt_cmd->se_cmd; + sense_reason_t sense_rc; + + sense_rc = transport_generic_new_cmd(se_cmd); + if (sense_rc) + return -EINVAL; + + if (se_cmd->data_direction == DMA_TO_DEVICE) + target_execute_cmd(se_cmd); + + wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem); + + pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", + se_cmd->scsi_status); + + return (se_cmd->scsi_status) ? -EINVAL : 0; +} + +static int target_xcopy_read_source( + struct se_cmd *ec_cmd, + struct xcopy_op *xop, + struct se_device *src_dev, + sector_t src_lba, + u32 src_bytes) +{ + struct xcopy_pt_cmd xpt_cmd; + struct se_cmd *se_cmd = &xpt_cmd.se_cmd; + u32 transfer_length_block = src_bytes / src_dev->dev_attrib.block_size; + int rc; + unsigned char cdb[16]; + bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); + + memset(&xpt_cmd, 0, sizeof(xpt_cmd)); + init_completion(&xpt_cmd.xpt_passthrough_sem); + + memset(&cdb[0], 0, 16); + cdb[0] = READ_16; + put_unaligned_be64(src_lba, &cdb[2]); + put_unaligned_be32(transfer_length_block, &cdb[10]); + pr_debug("XCOPY: Built READ_16: LBA: %llu Blocks: %u Length: %u\n", + (unsigned long long)src_lba, transfer_length_block, src_bytes); + + __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes, + DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0, + NULL); + rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0], + remote_port); + if (rc < 0) { + ec_cmd->scsi_status = se_cmd->scsi_status; + goto out; + } + + pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ" + " memory\n", xop->xop_data_sg, xop->xop_data_nents); + + rc = target_xcopy_issue_pt_cmd(&xpt_cmd); + if (rc < 0) + ec_cmd->scsi_status = se_cmd->scsi_status; +out: + transport_generic_free_cmd(se_cmd, 0); + return rc; +} + +static int target_xcopy_write_destination( + struct se_cmd *ec_cmd, + struct xcopy_op *xop, + struct se_device *dst_dev, + sector_t dst_lba, + u32 dst_bytes) +{ + struct xcopy_pt_cmd xpt_cmd; + struct se_cmd *se_cmd = &xpt_cmd.se_cmd; + u32 transfer_length_block = dst_bytes / dst_dev->dev_attrib.block_size; + int rc; + unsigned char cdb[16]; + bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); + + memset(&xpt_cmd, 0, sizeof(xpt_cmd)); + init_completion(&xpt_cmd.xpt_passthrough_sem); + + memset(&cdb[0], 0, 16); + cdb[0] = WRITE_16; + put_unaligned_be64(dst_lba, &cdb[2]); + put_unaligned_be32(transfer_length_block, &cdb[10]); + pr_debug("XCOPY: Built WRITE_16: LBA: %llu Blocks: %u Length: %u\n", + (unsigned long long)dst_lba, transfer_length_block, dst_bytes); + + __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes, + DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0, + NULL); + rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0], + remote_port); + if (rc < 0) { + ec_cmd->scsi_status = se_cmd->scsi_status; + goto out; + } + + rc = target_xcopy_issue_pt_cmd(&xpt_cmd); + if (rc < 0) + ec_cmd->scsi_status = se_cmd->scsi_status; +out: + transport_generic_free_cmd(se_cmd, 0); + return rc; +} + +static void target_xcopy_do_work(struct work_struct *work) +{ + struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); + struct se_cmd *ec_cmd = xop->xop_se_cmd; + struct se_device *src_dev, *dst_dev; + sector_t src_lba, dst_lba, end_lba; + unsigned long long max_bytes, max_bytes_src, max_bytes_dst, max_blocks; + int rc = 0; + unsigned short nolb; + unsigned int copied_bytes = 0; + sense_reason_t sense_rc; + + sense_rc = target_parse_xcopy_cmd(xop); + if (sense_rc != TCM_NO_SENSE) + goto err_free; + + if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) { + sense_rc = TCM_INVALID_PARAMETER_LIST; + goto err_free; + } + + src_dev = xop->src_dev; + dst_dev = xop->dst_dev; + src_lba = xop->src_lba; + dst_lba = xop->dst_lba; + nolb = xop->nolb; + end_lba = src_lba + nolb; + /* + * Break up XCOPY I/O into hw_max_sectors * hw_block_size sized + * I/O based on the smallest max_bytes between src_dev + dst_dev + */ + max_bytes_src = (unsigned long long) src_dev->dev_attrib.hw_max_sectors * + src_dev->dev_attrib.hw_block_size; + max_bytes_dst = (unsigned long long) dst_dev->dev_attrib.hw_max_sectors * + dst_dev->dev_attrib.hw_block_size; + + max_bytes = min_t(u64, max_bytes_src, max_bytes_dst); + max_bytes = min_t(u64, max_bytes, XCOPY_MAX_BYTES); + + /* + * Using shift instead of the division because otherwise GCC + * generates __udivdi3 that is missing on i386 + */ + max_blocks = max_bytes >> ilog2(src_dev->dev_attrib.block_size); + + pr_debug("%s: nolb: %u, max_blocks: %llu end_lba: %llu\n", __func__, + nolb, max_blocks, (unsigned long long)end_lba); + pr_debug("%s: Starting src_lba: %llu, dst_lba: %llu\n", __func__, + (unsigned long long)src_lba, (unsigned long long)dst_lba); + + while (nolb) { + u32 cur_bytes = min_t(u64, max_bytes, nolb * src_dev->dev_attrib.block_size); + unsigned short cur_nolb = cur_bytes / src_dev->dev_attrib.block_size; + + if (cur_bytes != xop->xop_data_bytes) { + /* + * (Re)allocate a buffer large enough to hold the XCOPY + * I/O size, which can be reused each read / write loop. + */ + target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); + rc = target_alloc_sgl(&xop->xop_data_sg, + &xop->xop_data_nents, + cur_bytes, + false, false); + if (rc < 0) + goto out; + xop->xop_data_bytes = cur_bytes; + } + + pr_debug("%s: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n", + __func__, src_dev, (unsigned long long)src_lba, cur_nolb); + + rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_bytes); + if (rc < 0) + goto out; + + src_lba += cur_bytes / src_dev->dev_attrib.block_size; + pr_debug("%s: Incremented READ src_lba to %llu\n", __func__, + (unsigned long long)src_lba); + + pr_debug("%s: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %u\n", + __func__, dst_dev, (unsigned long long)dst_lba, cur_nolb); + + rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, + dst_lba, cur_bytes); + if (rc < 0) + goto out; + + dst_lba += cur_bytes / dst_dev->dev_attrib.block_size; + pr_debug("%s: Incremented WRITE dst_lba to %llu\n", __func__, + (unsigned long long)dst_lba); + + copied_bytes += cur_bytes; + nolb -= cur_bytes / src_dev->dev_attrib.block_size; + } + + xcopy_pt_undepend_remotedev(xop); + target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); + kfree(xop); + + pr_debug("%s: Final src_lba: %llu, dst_lba: %llu\n", __func__, + (unsigned long long)src_lba, (unsigned long long)dst_lba); + pr_debug("%s: Blocks copied: %u, Bytes Copied: %u\n", __func__, + copied_bytes / dst_dev->dev_attrib.block_size, copied_bytes); + + pr_debug("%s: Setting X-COPY GOOD status -> sending response\n", __func__); + target_complete_cmd(ec_cmd, SAM_STAT_GOOD); + return; + +out: + /* + * The XCOPY command was aborted after some data was transferred. + * Terminate command with CHECK CONDITION status, with the sense key + * set to COPY ABORTED. + */ + sense_rc = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE; + xcopy_pt_undepend_remotedev(xop); + target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); + +err_free: + kfree(xop); + pr_warn_ratelimited("%s: rc: %d, sense: %u, XCOPY operation failed\n", + __func__, rc, sense_rc); + target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc); +} + +/* + * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing + * fails. + */ +static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop) +{ + struct se_cmd *se_cmd = xop->xop_se_cmd; + unsigned char *p = NULL, *seg_desc; + unsigned int list_id, list_id_usage, sdll, inline_dl; + sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; + int rc; + unsigned short tdll; + + p = transport_kmap_data_sg(se_cmd); + if (!p) { + pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); + return TCM_OUT_OF_RESOURCES; + } + + list_id = p[0]; + list_id_usage = (p[1] & 0x18) >> 3; + + /* + * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH + */ + tdll = get_unaligned_be16(&p[2]); + sdll = get_unaligned_be32(&p[8]); + if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) { + pr_err("XCOPY descriptor list length %u exceeds maximum %u\n", + tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN); + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } + + inline_dl = get_unaligned_be32(&p[12]); + if (inline_dl != 0) { + pr_err("XCOPY with non zero inline data length\n"); + goto out; + } + + if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) { + pr_err("XCOPY parameter truncation: data length %u too small " + "for tdll: %hu sdll: %u inline_dl: %u\n", + se_cmd->data_length, tdll, sdll, inline_dl); + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } + + pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" + " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, + tdll, sdll, inline_dl); + + /* + * skip over the target descriptors until segment descriptors + * have been passed - CSCD ids are needed to determine src and dest. + */ + seg_desc = &p[16] + tdll; + + rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret); + if (rc <= 0) + goto out; + + pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, + rc * XCOPY_SEGMENT_DESC_LEN); + + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); + if (rc <= 0) + goto out; + + if (xop->src_dev->dev_attrib.block_size != + xop->dst_dev->dev_attrib.block_size) { + pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" + " block_size: %u currently unsupported\n", + xop->src_dev->dev_attrib.block_size, + xop->dst_dev->dev_attrib.block_size); + xcopy_pt_undepend_remotedev(xop); + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto out; + } + + pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, + rc * XCOPY_TARGET_DESC_LEN); + transport_kunmap_data_sg(se_cmd); + return TCM_NO_SENSE; + +out: + if (p) + transport_kunmap_data_sg(se_cmd); + return ret; +} + +sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) +{ + struct se_device *dev = se_cmd->se_dev; + struct xcopy_op *xop; + unsigned int sa; + + if (!dev->dev_attrib.emulate_3pc) { + pr_err("EXTENDED_COPY operation explicitly disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + sa = se_cmd->t_task_cdb[1] & 0x1f; + if (sa != 0x00) { + pr_err("EXTENDED_COPY(LID4) not supported\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (se_cmd->data_length == 0) { + target_complete_cmd(se_cmd, SAM_STAT_GOOD); + return TCM_NO_SENSE; + } + if (se_cmd->data_length < XCOPY_HDR_LEN) { + pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", + se_cmd->data_length, XCOPY_HDR_LEN); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); + if (!xop) + goto err; + xop->xop_se_cmd = se_cmd; + INIT_WORK(&xop->xop_work, target_xcopy_do_work); + if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work))) + goto free; + return TCM_NO_SENSE; + +free: + kfree(xop); + +err: + return TCM_OUT_OF_RESOURCES; +} + +static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) +{ + unsigned char *p; + + p = transport_kmap_data_sg(se_cmd); + if (!p) { + pr_err("transport_kmap_data_sg failed in" + " target_rcr_operating_parameters\n"); + return TCM_OUT_OF_RESOURCES; + } + + if (se_cmd->data_length < 54) { + pr_err("Receive Copy Results Op Parameters length" + " too small: %u\n", se_cmd->data_length); + transport_kunmap_data_sg(se_cmd); + return TCM_INVALID_CDB_FIELD; + } + /* + * Set SNLID=1 (Supports no List ID) + */ + p[4] = 0x1; + /* + * MAXIMUM TARGET DESCRIPTOR COUNT + */ + put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]); + /* + * MAXIMUM SEGMENT DESCRIPTOR COUNT + */ + put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]); + /* + * MAXIMUM DESCRIPTOR LIST LENGTH + */ + put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]); + /* + * MAXIMUM SEGMENT LENGTH + */ + put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]); + /* + * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED) + */ + put_unaligned_be32(0x0, &p[20]); + /* + * HELD DATA LIMIT + */ + put_unaligned_be32(0x0, &p[24]); + /* + * MAXIMUM STREAM DEVICE TRANSFER SIZE + */ + put_unaligned_be32(0x0, &p[28]); + /* + * TOTAL CONCURRENT COPIES + */ + put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]); + /* + * MAXIMUM CONCURRENT COPIES + */ + p[36] = RCR_OP_MAX_CONCURR_COPIES; + /* + * DATA SEGMENT GRANULARITY (log 2) + */ + p[37] = RCR_OP_DATA_SEG_GRAN_LOG2; + /* + * INLINE DATA GRANULARITY log 2) + */ + p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2; + /* + * HELD DATA GRANULARITY + */ + p[39] = RCR_OP_HELD_DATA_GRAN_LOG2; + /* + * IMPLEMENTED DESCRIPTOR LIST LENGTH + */ + p[43] = 0x2; + /* + * List of implemented descriptor type codes (ordered) + */ + p[44] = 0x02; /* Copy Block to Block device */ + p[45] = 0xe4; /* Identification descriptor target descriptor */ + + /* + * AVAILABLE DATA (n-3) + */ + put_unaligned_be32(42, &p[0]); + + transport_kunmap_data_sg(se_cmd); + target_complete_cmd(se_cmd, SAM_STAT_GOOD); + + return TCM_NO_SENSE; +} + +sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd) +{ + unsigned char *cdb = &se_cmd->t_task_cdb[0]; + int sa = (cdb[1] & 0x1f), list_id = cdb[2]; + struct se_device *dev = se_cmd->se_dev; + sense_reason_t rc = TCM_NO_SENSE; + + if (!dev->dev_attrib.emulate_3pc) { + pr_debug("Third-party copy operations explicitly disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:" + " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); + + if (list_id != 0) { + pr_err("Receive Copy Results with non zero list identifier" + " not supported\n"); + return TCM_INVALID_CDB_FIELD; + } + + switch (sa) { + case RCR_SA_OPERATING_PARAMETERS: + rc = target_rcr_operating_parameters(se_cmd); + break; + case RCR_SA_COPY_STATUS: + case RCR_SA_RECEIVE_DATA: + case RCR_SA_FAILED_SEGMENT_DETAILS: + default: + pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa); + return TCM_INVALID_CDB_FIELD; + } + + return rc; +} diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h new file mode 100644 index 0000000000..0aad7dc658 --- /dev/null +++ b/drivers/target/target_core_xcopy.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <target/target_core_base.h> + +#define XCOPY_HDR_LEN 16 +#define XCOPY_TARGET_DESC_LEN 32 +#define XCOPY_SEGMENT_DESC_LEN 28 +#define XCOPY_NAA_IEEE_REGEX_LEN 16 +#define XCOPY_MAX_BYTES 16777216 /* 16 MB */ + +/* + * SPC4r37 6.4.6.1 + * Table 150 — CSCD descriptor ID values + */ +#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF + +enum xcopy_origin_list { + XCOL_SOURCE_RECV_OP = 0x01, + XCOL_DEST_RECV_OP = 0x02, +}; + +struct xcopy_op { + int op_origin; + + struct se_cmd *xop_se_cmd; + struct se_device *src_dev; + unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + struct se_device *dst_dev; + unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + struct percpu_ref *remote_lun_ref; + + sector_t src_lba; + sector_t dst_lba; + unsigned short stdi; + unsigned short dtdi; + unsigned short nolb; + + u32 xop_data_bytes; + u32 xop_data_nents; + struct scatterlist *xop_data_sg; + struct work_struct xop_work; +}; + +/* + * Receive Copy Results Sevice Actions + */ +#define RCR_SA_COPY_STATUS 0x00 +#define RCR_SA_RECEIVE_DATA 0x01 +#define RCR_SA_OPERATING_PARAMETERS 0x03 +#define RCR_SA_FAILED_SEGMENT_DETAILS 0x04 + +/* + * Receive Copy Results defs for Operating Parameters + */ +#define RCR_OP_MAX_TARGET_DESC_COUNT 0x2 +#define RCR_OP_MAX_SG_DESC_COUNT 0x1 +#define RCR_OP_MAX_DESC_LIST_LEN 1024 +#define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */ +#define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */ +#define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */ +#define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */ +#define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */ +#define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */ + +extern int target_xcopy_setup_pt(void); +extern void target_xcopy_release_pt(void); +extern sense_reason_t target_do_xcopy(struct se_cmd *); +extern sense_reason_t target_do_receive_copy_results(struct se_cmd *); diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig new file mode 100644 index 0000000000..4f3b926b6a --- /dev/null +++ b/drivers/target/tcm_fc/Kconfig @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +config TCM_FC + tristate "TCM_FC fabric Plugin" + depends on LIBFC + help + Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile new file mode 100644 index 0000000000..a7d1593ab5 --- /dev/null +++ b/drivers/target/tcm_fc/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +tcm_fc-y += tfc_cmd.o \ + tfc_conf.o \ + tfc_io.o \ + tfc_sess.o + +obj-$(CONFIG_TCM_FC) += tcm_fc.o diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h new file mode 100644 index 0000000000..00e5573c62 --- /dev/null +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010 Cisco Systems, Inc. + */ +#ifndef __TCM_FC_H__ +#define __TCM_FC_H__ + +#include <linux/types.h> +#include <target/target_core_base.h> + +#define FT_VERSION "0.4" + +#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ +#define FT_TPG_NAMELEN 32 /* max length of TPG name */ +#define FT_LUN_NAMELEN 32 /* max length of LUN name */ +#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */ + +struct ft_transport_id { + __u8 format; + __u8 __resvd1[7]; + __u8 wwpn[8]; + __u8 __resvd2[8]; +} __attribute__((__packed__)); + +/* + * Session (remote port). + */ +struct ft_sess { + u32 port_id; /* for hash lookup use only */ + u32 params; + u16 max_frame; /* maximum frame size */ + u64 port_name; /* port name for transport ID */ + struct ft_tport *tport; + struct se_session *se_sess; + struct hlist_node hash; /* linkage in ft_sess_hash table */ + struct rcu_head rcu; + struct kref kref; /* ref for hash and outstanding I/Os */ +}; + +/* + * Hash table of sessions per local port. + * Hash lookup by remote port FC_ID. + */ +#define FT_SESS_HASH_BITS 6 +#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS) + +/* + * Per local port data. + * This is created only after a TPG exists that allows target function + * for the local port. If the TPG exists, this is allocated when + * we're notified that the local port has been created, or when + * the first PRLI provider callback is received. + */ +struct ft_tport { + struct fc_lport *lport; + struct ft_tpg *tpg; /* NULL if TPG deleted before tport */ + u32 sess_count; /* number of sessions in hash */ + struct rcu_head rcu; + struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */ +}; + +/* + * Node ID and authentication. + */ +struct ft_node_auth { + u64 port_name; + u64 node_name; +}; + +/* + * Node ACL for FC remote port session. + */ +struct ft_node_acl { + struct se_node_acl se_node_acl; + struct ft_node_auth node_auth; +}; + +struct ft_lun { + u32 index; + char name[FT_LUN_NAMELEN]; +}; + +/* + * Target portal group (local port). + */ +struct ft_tpg { + u32 index; + struct ft_lport_wwn *lport_wwn; + struct ft_tport *tport; /* active tport or NULL */ + struct list_head lun_list; /* head of LUNs */ + struct se_portal_group se_tpg; + struct workqueue_struct *workqueue; +}; + +struct ft_lport_wwn { + u64 wwpn; + char name[FT_NAMELEN]; + struct list_head ft_wwn_node; + struct ft_tpg *tpg; + struct se_wwn se_wwn; +}; + +/* + * Commands + */ +struct ft_cmd { + struct ft_sess *sess; /* session held for cmd */ + struct fc_seq *seq; /* sequence in exchange mgr */ + struct se_cmd se_cmd; /* Local TCM I/O descriptor */ + struct fc_frame *req_frame; + u32 write_data_len; /* data received on writes */ + struct work_struct work; + /* Local sense buffer */ + unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; + u32 was_ddp_setup:1; /* Set only if ddp is setup */ + u32 aborted:1; /* Set if aborted by reset or timeout */ + struct scatterlist *sg; /* Set only if DDP is setup */ + u32 sg_cnt; /* No. of item in scatterlist */ +}; + +extern struct mutex ft_lport_lock; +extern struct fc4_prov ft_prov; +extern unsigned int ft_debug_logging; + +/* + * Fabric methods. + */ + +/* + * Session ops. + */ +void ft_sess_put(struct ft_sess *); +void ft_sess_close(struct se_session *); +u32 ft_sess_get_index(struct se_session *); +u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); + +void ft_lport_add(struct fc_lport *, void *); +void ft_lport_del(struct fc_lport *, void *); +int ft_lport_notify(struct notifier_block *, unsigned long, void *); + +/* + * IO methods. + */ +int ft_check_stop_free(struct se_cmd *); +void ft_release_cmd(struct se_cmd *); +int ft_queue_status(struct se_cmd *); +int ft_queue_data_in(struct se_cmd *); +int ft_write_pending(struct se_cmd *); +void ft_queue_tm_resp(struct se_cmd *); +void ft_aborted_task(struct se_cmd *); + +/* + * other internal functions. + */ +void ft_recv_req(struct ft_sess *, struct fc_frame *); +struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); + +void ft_recv_write_data(struct ft_cmd *, struct fc_frame *); +void ft_dump_cmd(struct ft_cmd *, const char *caller); + +ssize_t ft_format_wwn(char *, size_t, u64); + +/* + * Underlying HW specific helper function + */ +void ft_invl_hw_context(struct ft_cmd *); + +#endif /* __TCM_FC_H__ */ diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c new file mode 100644 index 0000000000..21783cd71c --- /dev/null +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2010 Cisco Systems, Inc. + */ + +/* XXX TBD some includes may be extraneous */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/hash.h> +#include <asm/unaligned.h> +#include <scsi/scsi_tcq.h> +#include <scsi/libfc.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "tcm_fc.h" + +/* + * Dump cmd state for debugging. + */ +static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +{ + struct fc_exch *ep; + struct fc_seq *sp; + struct se_cmd *se_cmd; + struct scatterlist *sg; + int count; + + se_cmd = &cmd->se_cmd; + pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", + caller, cmd, cmd->sess, cmd->seq, se_cmd); + + pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", + caller, cmd, se_cmd->t_data_nents, + se_cmd->data_length, se_cmd->se_cmd_flags); + + for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) + pr_debug("%s: cmd %p sg %p page %p " + "len 0x%x off 0x%x\n", + caller, cmd, sg, + sg_page(sg), sg->length, sg->offset); + + sp = cmd->seq; + if (sp) { + ep = fc_seq_exch(sp); + pr_debug("%s: cmd %p sid %x did %x " + "ox_id %x rx_id %x seq_id %x e_stat %x\n", + caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, + sp->id, ep->esb_stat); + } +} + +void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +{ + if (unlikely(ft_debug_logging)) + _ft_dump_cmd(cmd, caller); +} + +static void ft_free_cmd(struct ft_cmd *cmd) +{ + struct fc_frame *fp; + struct ft_sess *sess; + + if (!cmd) + return; + sess = cmd->sess; + fp = cmd->req_frame; + if (fr_seq(fp)) + fc_seq_release(fr_seq(fp)); + fc_frame_free(fp); + target_free_tag(sess->se_sess, &cmd->se_cmd); + ft_sess_put(sess); /* undo get from lookup at recv */ +} + +void ft_release_cmd(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + + ft_free_cmd(cmd); +} + +int ft_check_stop_free(struct se_cmd *se_cmd) +{ + return transport_generic_free_cmd(se_cmd, 0); +} + +/* + * Send response. + */ +int ft_queue_status(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct fc_frame *fp; + struct fcp_resp_with_ext *fcp; + struct fc_lport *lport; + struct fc_exch *ep; + size_t len; + int rc; + + if (cmd->aborted) + return 0; + ft_dump_cmd(cmd, __func__); + ep = fc_seq_exch(cmd->seq); + lport = ep->lp; + len = sizeof(*fcp) + se_cmd->scsi_sense_length; + fp = fc_frame_alloc(lport, len); + if (!fp) { + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; + } + + fcp = fc_frame_payload_get(fp, len); + memset(fcp, 0, len); + fcp->resp.fr_status = se_cmd->scsi_status; + + len = se_cmd->scsi_sense_length; + if (len) { + fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; + fcp->ext.fr_sns_len = htonl(len); + memcpy((fcp + 1), se_cmd->sense_buffer, len); + } + + /* + * Test underflow and overflow with one mask. Usually both are off. + * Bidirectional commands are not handled yet. + */ + if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { + if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) + fcp->resp.fr_flags |= FCP_RESID_OVER; + else + fcp->resp.fr_flags |= FCP_RESID_UNDER; + fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); + } + + /* + * Send response. + */ + cmd->seq = fc_seq_start_next(cmd->seq); + fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, + FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); + + rc = fc_seq_send(lport, cmd->seq, fp); + if (rc) { + pr_info_ratelimited("%s: Failed to send response frame %p, " + "xid <0x%x>\n", __func__, fp, ep->xid); + /* + * Generate a TASK_SET_FULL status to notify the initiator + * to reduce it's queue_depth after the se_cmd response has + * been re-queued by target-core. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + return -ENOMEM; + } + fc_exch_done(cmd->seq); + /* + * Drop the extra ACK_KREF reference taken by target_submit_cmd() + * ahead of ft_check_stop_free() -> transport_generic_free_cmd() + * final se_cmd->cmd_kref put. + */ + target_put_sess_cmd(&cmd->se_cmd); + return 0; +} + +/* + * Send TX_RDY (transfer ready). + */ +int ft_write_pending(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct fc_frame *fp; + struct fcp_txrdy *txrdy; + struct fc_lport *lport; + struct fc_exch *ep; + struct fc_frame_header *fh; + u32 f_ctl; + + ft_dump_cmd(cmd, __func__); + + if (cmd->aborted) + return 0; + ep = fc_seq_exch(cmd->seq); + lport = ep->lp; + fp = fc_frame_alloc(lport, sizeof(*txrdy)); + if (!fp) + return -ENOMEM; /* Signal QUEUE_FULL */ + + txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); + memset(txrdy, 0, sizeof(*txrdy)); + txrdy->ft_burst_len = htonl(se_cmd->data_length); + + cmd->seq = fc_seq_start_next(cmd->seq); + fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, + FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + fh = fc_frame_header_get(fp); + f_ctl = ntoh24(fh->fh_f_ctl); + + /* Only if it is 'Exchange Responder' */ + if (f_ctl & FC_FC_EX_CTX) { + /* Target is 'exchange responder' and sending XFER_READY + * to 'exchange initiator (initiator)' + */ + if ((ep->xid <= lport->lro_xid) && + (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { + if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && + lport->tt.ddp_target(lport, ep->xid, + se_cmd->t_data_sg, + se_cmd->t_data_nents)) + cmd->was_ddp_setup = 1; + } + } + fc_seq_send(lport, cmd->seq, fp); + return 0; +} + +/* + * FC sequence response handler for follow-on sequences (data) and aborts. + */ +static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) +{ + struct ft_cmd *cmd = arg; + struct fc_frame_header *fh; + + if (IS_ERR(fp)) { + /* XXX need to find cmd if queued */ + cmd->seq = NULL; + cmd->aborted = true; + return; + } + + fh = fc_frame_header_get(fp); + + switch (fh->fh_r_ctl) { + case FC_RCTL_DD_SOL_DATA: /* write data */ + ft_recv_write_data(cmd, fp); + break; + case FC_RCTL_DD_UNSOL_CTL: /* command */ + case FC_RCTL_DD_SOL_CTL: /* transfer ready */ + case FC_RCTL_DD_DATA_DESC: /* transfer ready */ + default: + pr_debug("%s: unhandled frame r_ctl %x\n", + __func__, fh->fh_r_ctl); + ft_invl_hw_context(cmd); + fc_frame_free(fp); + transport_generic_free_cmd(&cmd->se_cmd, 0); + break; + } +} + +/* + * Send a FCP response including SCSI status and optional FCP rsp_code. + * status is SAM_STAT_GOOD (zero) iff code is valid. + * This is used in error cases, such as allocation failures. + */ +static void ft_send_resp_status(struct fc_lport *lport, + const struct fc_frame *rx_fp, + u32 status, enum fcp_resp_rsp_codes code) +{ + struct fc_frame *fp; + struct fc_seq *sp; + const struct fc_frame_header *fh; + size_t len; + struct fcp_resp_with_ext *fcp; + struct fcp_resp_rsp_info *info; + + fh = fc_frame_header_get(rx_fp); + pr_debug("FCP error response: did %x oxid %x status %x code %x\n", + ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); + len = sizeof(*fcp); + if (status == SAM_STAT_GOOD) + len += sizeof(*info); + fp = fc_frame_alloc(lport, len); + if (!fp) + return; + fcp = fc_frame_payload_get(fp, len); + memset(fcp, 0, len); + fcp->resp.fr_status = status; + if (status == SAM_STAT_GOOD) { + fcp->ext.fr_rsp_len = htonl(sizeof(*info)); + fcp->resp.fr_flags |= FCP_RSP_LEN_VAL; + info = (struct fcp_resp_rsp_info *)(fcp + 1); + info->rsp_code = code; + } + + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); + sp = fr_seq(fp); + if (sp) { + fc_seq_send(lport, sp, fp); + fc_exch_done(sp); + } else { + lport->tt.frame_send(lport, fp); + } +} + +/* + * Send error or task management response. + */ +static void ft_send_resp_code(struct ft_cmd *cmd, + enum fcp_resp_rsp_codes code) +{ + ft_send_resp_status(cmd->sess->tport->lport, + cmd->req_frame, SAM_STAT_GOOD, code); +} + + +/* + * Send error or task management response. + * Always frees the cmd and associated state. + */ +static void ft_send_resp_code_and_free(struct ft_cmd *cmd, + enum fcp_resp_rsp_codes code) +{ + ft_send_resp_code(cmd, code); + ft_free_cmd(cmd); +} + +/* + * Handle Task Management Request. + */ +static void ft_send_tm(struct ft_cmd *cmd) +{ + struct fcp_cmnd *fcp; + int rc; + u8 tm_func; + + fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); + + switch (fcp->fc_tm_flags) { + case FCP_TMF_LUN_RESET: + tm_func = TMR_LUN_RESET; + break; + case FCP_TMF_TGT_RESET: + tm_func = TMR_TARGET_WARM_RESET; + break; + case FCP_TMF_CLR_TASK_SET: + tm_func = TMR_CLEAR_TASK_SET; + break; + case FCP_TMF_ABT_TASK_SET: + tm_func = TMR_ABORT_TASK_SET; + break; + case FCP_TMF_CLR_ACA: + tm_func = TMR_CLEAR_ACA; + break; + default: + /* + * FCP4r01 indicates having a combination of + * tm_flags set is invalid. + */ + pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); + ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); + return; + } + + /* FIXME: Add referenced task tag for ABORT_TASK */ + rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, + &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), + cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF); + if (rc < 0) + ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); +} + +/* + * Send status from completed task management request. + */ +void ft_queue_tm_resp(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct se_tmr_req *tmr = se_cmd->se_tmr_req; + enum fcp_resp_rsp_codes code; + + if (cmd->aborted) + return; + switch (tmr->response) { + case TMR_FUNCTION_COMPLETE: + code = FCP_TMF_CMPL; + break; + case TMR_LUN_DOES_NOT_EXIST: + code = FCP_TMF_INVALID_LUN; + break; + case TMR_FUNCTION_REJECTED: + code = FCP_TMF_REJECTED; + break; + case TMR_TASK_DOES_NOT_EXIST: + case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: + default: + code = FCP_TMF_FAILED; + break; + } + pr_debug("tmr fn %d resp %d fcp code %d\n", + tmr->function, tmr->response, code); + ft_send_resp_code(cmd, code); + /* + * Drop the extra ACK_KREF reference taken by target_submit_tmr() + * ahead of ft_check_stop_free() -> transport_generic_free_cmd() + * final se_cmd->cmd_kref put. + */ + target_put_sess_cmd(&cmd->se_cmd); +} + +void ft_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + +static void ft_send_work(struct work_struct *work); + +/* + * Handle incoming FCP command. + */ +static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) +{ + struct ft_cmd *cmd; + struct fc_lport *lport = sess->tport->lport; + struct se_session *se_sess = sess->se_sess; + int tag, cpu; + + tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); + if (tag < 0) + goto busy; + + cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct ft_cmd)); + + cmd->se_cmd.map_tag = tag; + cmd->se_cmd.map_cpu = cpu; + cmd->sess = sess; + cmd->seq = fc_seq_assign(lport, fp); + if (!cmd->seq) { + target_free_tag(se_sess, &cmd->se_cmd); + goto busy; + } + cmd->req_frame = fp; /* hold frame during cmd */ + + INIT_WORK(&cmd->work, ft_send_work); + queue_work(sess->tport->tpg->workqueue, &cmd->work); + return; + +busy: + pr_debug("cmd or seq allocation failure - sending BUSY\n"); + ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); + fc_frame_free(fp); + ft_sess_put(sess); /* undo get from lookup */ +} + + +/* + * Handle incoming FCP frame. + * Caller has verified that the frame is type FCP. + */ +void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (fh->fh_r_ctl) { + case FC_RCTL_DD_UNSOL_CMD: /* command */ + ft_recv_cmd(sess, fp); + break; + case FC_RCTL_DD_SOL_DATA: /* write data */ + case FC_RCTL_DD_UNSOL_CTL: + case FC_RCTL_DD_SOL_CTL: + case FC_RCTL_DD_DATA_DESC: /* transfer ready */ + case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ + default: + pr_debug("%s: unhandled frame r_ctl %x\n", + __func__, fh->fh_r_ctl); + fc_frame_free(fp); + ft_sess_put(sess); /* undo get from lookup */ + break; + } +} + +/* + * Send new command to target. + */ +static void ft_send_work(struct work_struct *work) +{ + struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); + struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); + struct fcp_cmnd *fcp; + int data_dir = 0; + int task_attr; + + fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); + if (!fcp) + goto err; + + if (fcp->fc_flags & FCP_CFL_LEN_MASK) + goto err; /* not handling longer CDBs yet */ + + /* + * Check for FCP task management flags + */ + if (fcp->fc_tm_flags) { + ft_send_tm(cmd); + return; + } + + switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { + case 0: + data_dir = DMA_NONE; + break; + case FCP_CFL_RDDATA: + data_dir = DMA_FROM_DEVICE; + break; + case FCP_CFL_WRDATA: + data_dir = DMA_TO_DEVICE; + break; + case FCP_CFL_WRDATA | FCP_CFL_RDDATA: + goto err; /* TBD not supported by tcm_fc yet */ + } + /* + * Locate the SAM Task Attr from fc_pri_ta + */ + switch (fcp->fc_pri_ta & FCP_PTA_MASK) { + case FCP_PTA_HEADQ: + task_attr = TCM_HEAD_TAG; + break; + case FCP_PTA_ORDERED: + task_attr = TCM_ORDERED_TAG; + break; + case FCP_PTA_ACA: + task_attr = TCM_ACA_TAG; + break; + case FCP_PTA_SIMPLE: + default: + task_attr = TCM_SIMPLE_TAG; + } + + fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd); + cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid; + + /* + * Use a single se_cmd->cmd_kref as we expect to release se_cmd + * directly from ft_check_stop_free callback in response path. + */ + if (target_init_cmd(&cmd->se_cmd, cmd->sess->se_sess, + &cmd->ft_sense_buffer[0], + scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), + task_attr, data_dir, TARGET_SCF_ACK_KREF)) + goto err; + + if (target_submit_prep(&cmd->se_cmd, fcp->fc_cdb, NULL, 0, NULL, 0, + NULL, 0, GFP_KERNEL)) + return; + + target_submit(&cmd->se_cmd); + pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd); + return; + +err: + ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); +} diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c new file mode 100644 index 0000000000..6ac3fc1a7d --- /dev/null +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * Filename: tcm_fc.c + * + * This file contains the configfs implementation for TCM_fc fabric node. + * Based on tcm_loop_configfs.c + * + * Copyright (c) 2010 Cisco Systems, Inc. + * Copyright (c) 2009,2010 Rising Tide, Inc. + * Copyright (c) 2009,2010 Linux-iSCSI.org + * + * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org> + * + ****************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <generated/utsrelease.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/kernel.h> +#include <linux/ctype.h> +#include <asm/unaligned.h> +#include <scsi/libfc.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "tcm_fc.h" + +static LIST_HEAD(ft_wwn_list); +DEFINE_MUTEX(ft_lport_lock); + +unsigned int ft_debug_logging; +module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +/* + * Parse WWN. + * If strict, we require lower-case hex and colon separators to be sure + * the name is the same as what would be generated by ft_format_wwn() + * so the name and wwn are mapped one-to-one. + */ +static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) +{ + const char *cp; + char c; + u32 byte = 0; + u32 pos = 0; + u32 err; + int val; + + *wwn = 0; + for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { + c = *cp; + if (c == '\n' && cp[1] == '\0') + continue; + if (strict && pos++ == 2 && byte++ < 7) { + pos = 0; + if (c == ':') + continue; + err = 1; + goto fail; + } + if (c == '\0') { + err = 2; + if (strict && byte != 8) + goto fail; + return cp - name; + } + err = 3; + val = hex_to_bin(c); + if (val < 0 || (strict && isupper(c))) + goto fail; + *wwn = (*wwn << 4) | val; + } + err = 4; +fail: + pr_debug("err %u len %zu pos %u byte %u\n", + err, cp - name, pos, byte); + return -1; +} + +ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn) +{ + u8 b[8]; + + put_unaligned_be64(wwn, b); + return snprintf(buf, len, + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); +} + +static ssize_t ft_wwn_show(void *arg, char *buf) +{ + u64 *wwn = arg; + ssize_t len; + + len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn); + buf[len++] = '\n'; + return len; +} + +static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len) +{ + ssize_t ret; + u64 wwn; + + ret = ft_parse_wwn(buf, &wwn, 0); + if (ret > 0) + *(u64 *)arg = wwn; + return ret; +} + +/* + * ACL auth ops. + */ + +static ssize_t ft_nacl_port_name_show(struct config_item *item, char *page) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_show(&acl->node_auth.port_name, page); +} + +static ssize_t ft_nacl_port_name_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_store(&acl->node_auth.port_name, page, count); +} + +static ssize_t ft_nacl_node_name_show(struct config_item *item, + char *page) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_show(&acl->node_auth.node_name, page); +} + +static ssize_t ft_nacl_node_name_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_store(&acl->node_auth.node_name, page, count); +} + +CONFIGFS_ATTR(ft_nacl_, node_name); +CONFIGFS_ATTR(ft_nacl_, port_name); + +static ssize_t ft_nacl_tag_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag); +} + +static ssize_t ft_nacl_tag_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_node_acl *se_nacl = acl_to_nacl(item); + int ret; + + ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page); + + if (ret < 0) + return ret; + return count; +} + +CONFIGFS_ATTR(ft_nacl_, tag); + +static struct configfs_attribute *ft_nacl_base_attrs[] = { + &ft_nacl_attr_port_name, + &ft_nacl_attr_node_name, + &ft_nacl_attr_tag, + NULL, +}; + +/* + * ACL ops. + */ + +/* + * Add ACL for an initiator. The ACL is named arbitrarily. + * The port_name and/or node_name are attributes. + */ +static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name) +{ + struct ft_node_acl *acl = + container_of(nacl, struct ft_node_acl, se_node_acl); + u64 wwpn; + + if (ft_parse_wwn(name, &wwpn, 1) < 0) + return -EINVAL; + + acl->node_auth.port_name = wwpn; + return 0; +} + +/* + * local_port port_group (tpg) ops. + */ +static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name) +{ + struct ft_lport_wwn *ft_wwn; + struct ft_tpg *tpg; + struct workqueue_struct *wq; + unsigned long index; + int ret; + + pr_debug("tcm_fc: add tpg %s\n", name); + + /* + * Name must be "tpgt_" followed by the index. + */ + if (strstr(name, "tpgt_") != name) + return NULL; + + ret = kstrtoul(name + 5, 10, &index); + if (ret) + return NULL; + if (index > UINT_MAX) + return NULL; + + if ((index != 1)) { + pr_err("Error, a single TPG=1 is used for HW port mappings\n"); + return ERR_PTR(-ENOSYS); + } + + ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn); + tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); + if (!tpg) + return NULL; + tpg->index = index; + tpg->lport_wwn = ft_wwn; + INIT_LIST_HEAD(&tpg->lun_list); + + wq = alloc_workqueue("tcm_fc", 0, 1); + if (!wq) { + kfree(tpg); + return NULL; + } + + ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); + if (ret < 0) { + destroy_workqueue(wq); + kfree(tpg); + return NULL; + } + tpg->workqueue = wq; + + mutex_lock(&ft_lport_lock); + ft_wwn->tpg = tpg; + mutex_unlock(&ft_lport_lock); + + return &tpg->se_tpg; +} + +static void ft_del_tpg(struct se_portal_group *se_tpg) +{ + struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); + struct ft_lport_wwn *ft_wwn = tpg->lport_wwn; + + pr_debug("del tpg %s\n", + config_item_name(&tpg->se_tpg.tpg_group.cg_item)); + + destroy_workqueue(tpg->workqueue); + + /* Wait for sessions to be freed thru RCU, for BUG_ON below */ + synchronize_rcu(); + + mutex_lock(&ft_lport_lock); + ft_wwn->tpg = NULL; + if (tpg->tport) { + tpg->tport->tpg = NULL; + tpg->tport = NULL; + } + mutex_unlock(&ft_lport_lock); + + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +/* + * Verify that an lport is configured to use the tcm_fc module, and return + * the target port group that should be used. + * + * The caller holds ft_lport_lock. + */ +struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) +{ + struct ft_lport_wwn *ft_wwn; + + list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) { + if (ft_wwn->wwpn == lport->wwpn) + return ft_wwn->tpg; + } + return NULL; +} + +/* + * target config instance ops. + */ + +/* + * Add lport to allowed config. + * The name is the WWPN in lower-case ASCII, colon-separated bytes. + */ +static struct se_wwn *ft_add_wwn( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct ft_lport_wwn *ft_wwn; + struct ft_lport_wwn *old_ft_wwn; + u64 wwpn; + + pr_debug("add wwn %s\n", name); + if (ft_parse_wwn(name, &wwpn, 1) < 0) + return NULL; + ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL); + if (!ft_wwn) + return NULL; + ft_wwn->wwpn = wwpn; + + mutex_lock(&ft_lport_lock); + list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) { + if (old_ft_wwn->wwpn == wwpn) { + mutex_unlock(&ft_lport_lock); + kfree(ft_wwn); + return NULL; + } + } + list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list); + ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn); + mutex_unlock(&ft_lport_lock); + + return &ft_wwn->se_wwn; +} + +static void ft_del_wwn(struct se_wwn *wwn) +{ + struct ft_lport_wwn *ft_wwn = container_of(wwn, + struct ft_lport_wwn, se_wwn); + + pr_debug("del wwn %s\n", ft_wwn->name); + mutex_lock(&ft_lport_lock); + list_del(&ft_wwn->ft_wwn_node); + mutex_unlock(&ft_lport_lock); + + kfree(ft_wwn); +} + +static ssize_t ft_wwn_version_show(struct config_item *item, char *page) +{ + return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on " + ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); +} + +CONFIGFS_ATTR_RO(ft_wwn_, version); + +static struct configfs_attribute *ft_wwn_attrs[] = { + &ft_wwn_attr_version, + NULL, +}; + +static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg) +{ + return container_of(se_tpg, struct ft_tpg, se_tpg); +} + +static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + return ft_tpg(se_tpg)->lport_wwn->name; +} + +static u16 ft_get_tag(struct se_portal_group *se_tpg) +{ + /* + * This tag is used when forming SCSI Name identifier in EVPD=1 0x83 + * to represent the SCSI Target Port. + */ + return ft_tpg(se_tpg)->index; +} + +static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return ft_tpg(se_tpg)->index; +} + +static const struct target_core_fabric_ops ft_fabric_ops = { + .module = THIS_MODULE, + .fabric_name = "fc", + .node_acl_size = sizeof(struct ft_node_acl), + .tpg_get_wwn = ft_get_fabric_wwn, + .tpg_get_tag = ft_get_tag, + .tpg_get_inst_index = ft_tpg_get_inst_index, + .check_stop_free = ft_check_stop_free, + .release_cmd = ft_release_cmd, + .close_session = ft_sess_close, + .sess_get_index = ft_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = ft_write_pending, + .queue_data_in = ft_queue_data_in, + .queue_status = ft_queue_status, + .queue_tm_rsp = ft_queue_tm_resp, + .aborted_task = ft_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = &ft_add_wwn, + .fabric_drop_wwn = &ft_del_wwn, + .fabric_make_tpg = &ft_add_tpg, + .fabric_drop_tpg = &ft_del_tpg, + .fabric_init_nodeacl = &ft_init_nodeacl, + + .tfc_wwn_attrs = ft_wwn_attrs, + .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, +}; + +static struct notifier_block ft_notifier = { + .notifier_call = ft_lport_notify +}; + +static int __init ft_init(void) +{ + int ret; + + ret = target_register_template(&ft_fabric_ops); + if (ret) + goto out; + + ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov); + if (ret) + goto out_unregister_template; + + blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); + fc_lport_iterate(ft_lport_add, NULL); + return 0; + +out_unregister_template: + target_unregister_template(&ft_fabric_ops); +out: + return ret; +} + +static void __exit ft_exit(void) +{ + blocking_notifier_chain_unregister(&fc_lport_notifier_head, + &ft_notifier); + fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); + fc_lport_iterate(ft_lport_del, NULL); + target_unregister_template(&ft_fabric_ops); + synchronize_rcu(); +} + +MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); +MODULE_LICENSE("GPL"); +module_init(ft_init); +module_exit(ft_exit); diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c new file mode 100644 index 0000000000..bbe2e29612 --- /dev/null +++ b/drivers/target/tcm_fc/tfc_io.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2010 Cisco Systems, Inc. + * + * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c + * + * Copyright (c) 2007 Intel Corporation. All rights reserved. + * Copyright (c) 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2008 Mike Christie + * Copyright (c) 2009 Rising Tide, Inc. + * Copyright (c) 2009 Linux-iSCSI.org + * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org> + */ + +/* XXX TBD some includes may be extraneous */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/hash.h> +#include <linux/ratelimit.h> +#include <asm/unaligned.h> +#include <scsi/libfc.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "tcm_fc.h" + +/* + * Deliver read data back to initiator. + * XXX TBD handle resource problems later. + */ +int ft_queue_data_in(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct fc_frame *fp = NULL; + struct fc_exch *ep; + struct fc_lport *lport; + struct scatterlist *sg = NULL; + size_t remaining; + u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; + u32 mem_off = 0; + u32 fh_off = 0; + u32 frame_off = 0; + size_t frame_len = 0; + size_t mem_len = 0; + size_t tlen; + size_t off_in_page; + struct page *page = NULL; + int use_sg; + int error; + void *page_addr; + void *from; + void *to = NULL; + + if (cmd->aborted) + return 0; + + if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL) + goto queue_status; + + ep = fc_seq_exch(cmd->seq); + lport = ep->lp; + cmd->seq = fc_seq_start_next(cmd->seq); + + remaining = se_cmd->data_length; + + /* + * Setup to use first mem list entry, unless no data. + */ + BUG_ON(remaining && !se_cmd->t_data_sg); + if (remaining) { + sg = se_cmd->t_data_sg; + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); + } + + /* no scatter/gather in skb for odd word length due to fc_seq_send() */ + use_sg = !(remaining % 4); + + while (remaining) { + struct fc_seq *seq = cmd->seq; + + if (!seq) { + pr_debug("%s: Command aborted, xid 0x%x\n", + __func__, ep->xid); + break; + } + if (!mem_len) { + sg = sg_next(sg); + mem_len = min((size_t)sg->length, remaining); + mem_off = sg->offset; + page = sg_page(sg); + } + if (!frame_len) { + /* + * If lport's has capability of Large Send Offload LSO) + * , then allow 'frame_len' to be as big as 'lso_max' + * if indicated transfer length is >= lport->lso_max + */ + frame_len = (lport->seq_offload) ? lport->lso_max : + cmd->sess->max_frame; + frame_len = min(frame_len, remaining); + fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len); + if (!fp) + return -ENOMEM; + to = fc_frame_payload_get(fp, 0); + fh_off = frame_off; + frame_off += frame_len; + /* + * Setup the frame's max payload which is used by base + * driver to indicate HW about max frame size, so that + * HW can do fragmentation appropriately based on + * "gso_max_size" of underline netdev. + */ + fr_max_payload(fp) = cmd->sess->max_frame; + } + tlen = min(mem_len, frame_len); + + if (use_sg) { + off_in_page = mem_off; + BUG_ON(!page); + get_page(page); + skb_fill_page_desc(fp_skb(fp), + skb_shinfo(fp_skb(fp))->nr_frags, + page, off_in_page, tlen); + fr_len(fp) += tlen; + fp_skb(fp)->data_len += tlen; + fp_skb(fp)->truesize += page_size(page); + } else { + BUG_ON(!page); + from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); + page_addr = from; + from += offset_in_page(mem_off); + tlen = min(tlen, (size_t)(PAGE_SIZE - + offset_in_page(mem_off))); + memcpy(to, from, tlen); + kunmap_atomic(page_addr); + to += tlen; + } + + mem_off += tlen; + mem_len -= tlen; + frame_len -= tlen; + remaining -= tlen; + + if (frame_len && + (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN)) + continue; + if (!remaining) + f_ctl |= FC_FC_END_SEQ; + fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, + FC_TYPE_FCP, f_ctl, fh_off); + error = fc_seq_send(lport, seq, fp); + if (error) { + pr_info_ratelimited("%s: Failed to send frame %p, " + "xid <0x%x>, remaining %zu, " + "lso_max <0x%x>\n", + __func__, fp, ep->xid, + remaining, lport->lso_max); + /* + * Go ahead and set TASK_SET_FULL status ignoring the + * rest of the DataIN, and immediately attempt to + * send the response via ft_queue_status() in order + * to notify the initiator that it should reduce it's + * per LUN queue_depth. + */ + se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + break; + } + } +queue_status: + return ft_queue_status(se_cmd); +} + +static void ft_execute_work(struct work_struct *work) +{ + struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); + + target_execute_cmd(&cmd->se_cmd); +} + +/* + * Receive write data frame. + */ +void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct fc_seq *seq = cmd->seq; + struct fc_exch *ep; + struct fc_lport *lport; + struct fc_frame_header *fh; + struct scatterlist *sg = NULL; + u32 mem_off = 0; + u32 rel_off; + size_t frame_len; + size_t mem_len = 0; + size_t tlen; + struct page *page = NULL; + void *page_addr; + void *from; + void *to; + u32 f_ctl; + void *buf; + + fh = fc_frame_header_get(fp); + if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) + goto drop; + + f_ctl = ntoh24(fh->fh_f_ctl); + ep = fc_seq_exch(seq); + lport = ep->lp; + if (cmd->was_ddp_setup) { + BUG_ON(!lport); + /* + * Since DDP (Large Rx offload) was setup for this request, + * payload is expected to be copied directly to user buffers. + */ + buf = fc_frame_payload_get(fp, 1); + if (buf) + pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " + "cmd->sg_cnt 0x%x. DDP was setup" + " hence not expected to receive frame with " + "payload, Frame will be dropped if" + "'Sequence Initiative' bit in f_ctl is" + "not set\n", __func__, ep->xid, f_ctl, + se_cmd->t_data_sg, se_cmd->t_data_nents); + /* + * Invalidate HW DDP context if it was setup for respective + * command. Invalidation of HW DDP context is requited in both + * situation (success and error). + */ + ft_invl_hw_context(cmd); + + /* + * If "Sequence Initiative (TSI)" bit set in f_ctl, means last + * write data frame is received successfully where payload is + * posted directly to user buffer and only the last frame's + * header is posted in receive queue. + * + * If "Sequence Initiative (TSI)" bit is not set, means error + * condition w.r.t. DDP, hence drop the packet and let explict + * ABORTS from other end of exchange timer trigger the recovery. + */ + if (f_ctl & FC_FC_SEQ_INIT) + goto last_frame; + else + goto drop; + } + + rel_off = ntohl(fh->fh_parm_offset); + frame_len = fr_len(fp); + if (frame_len <= sizeof(*fh)) + goto drop; + frame_len -= sizeof(*fh); + from = fc_frame_payload_get(fp, 0); + if (rel_off >= se_cmd->data_length) + goto drop; + if (frame_len + rel_off > se_cmd->data_length) + frame_len = se_cmd->data_length - rel_off; + + /* + * Setup to use first mem list entry, unless no data. + */ + BUG_ON(frame_len && !se_cmd->t_data_sg); + if (frame_len) { + sg = se_cmd->t_data_sg; + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); + } + + while (frame_len) { + if (!mem_len) { + sg = sg_next(sg); + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); + } + if (rel_off >= mem_len) { + rel_off -= mem_len; + mem_len = 0; + continue; + } + mem_off += rel_off; + mem_len -= rel_off; + rel_off = 0; + + tlen = min(mem_len, frame_len); + + to = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); + page_addr = to; + to += offset_in_page(mem_off); + tlen = min(tlen, (size_t)(PAGE_SIZE - + offset_in_page(mem_off))); + memcpy(to, from, tlen); + kunmap_atomic(page_addr); + + from += tlen; + frame_len -= tlen; + mem_off += tlen; + mem_len -= tlen; + cmd->write_data_len += tlen; + } +last_frame: + if (cmd->write_data_len == se_cmd->data_length) { + INIT_WORK(&cmd->work, ft_execute_work); + queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work); + } +drop: + fc_frame_free(fp); +} + +/* + * Handle and cleanup any HW specific resources if + * received ABORTS, errors, timeouts. + */ +void ft_invl_hw_context(struct ft_cmd *cmd) +{ + struct fc_seq *seq; + struct fc_exch *ep = NULL; + struct fc_lport *lport = NULL; + + BUG_ON(!cmd); + seq = cmd->seq; + + /* Cleanup the DDP context in HW if DDP was setup */ + if (cmd->was_ddp_setup && seq) { + ep = fc_seq_exch(seq); + if (ep) { + lport = ep->lp; + if (lport && (ep->xid <= lport->lro_xid)) { + /* + * "ddp_done" trigger invalidation of HW + * specific DDP context + */ + cmd->write_data_len = lport->tt.ddp_done(lport, + ep->xid); + + /* + * Resetting same variable to indicate HW's + * DDP context has been invalidated to avoid + * re_invalidation of same context (context is + * identified using ep->xid) + */ + cmd->was_ddp_setup = 0; + } + } + } +} diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c new file mode 100644 index 0000000000..593540da93 --- /dev/null +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2010 Cisco Systems, Inc. + */ + +/* XXX TBD some includes may be extraneous */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/hash.h> +#include <linux/rcupdate.h> +#include <linux/rculist.h> +#include <linux/kref.h> +#include <asm/unaligned.h> +#include <scsi/libfc.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "tcm_fc.h" + +#define TFC_SESS_DBG(lport, fmt, args...) \ + pr_debug("host%u: rport %6.6x: " fmt, \ + (lport)->host->host_no, \ + (lport)->port_id, ##args ) + +static void ft_sess_delete_all(struct ft_tport *); + +/* + * Lookup or allocate target local port. + * Caller holds ft_lport_lock. + */ +static struct ft_tport *ft_tport_get(struct fc_lport *lport) +{ + struct ft_tpg *tpg; + struct ft_tport *tport; + int i; + + tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP], + lockdep_is_held(&ft_lport_lock)); + if (tport && tport->tpg) + return tport; + + tpg = ft_lport_find_tpg(lport); + if (!tpg) + return NULL; + + if (tport) { + tport->tpg = tpg; + tpg->tport = tport; + return tport; + } + + tport = kzalloc(sizeof(*tport), GFP_KERNEL); + if (!tport) + return NULL; + + tport->lport = lport; + tport->tpg = tpg; + tpg->tport = tport; + for (i = 0; i < FT_SESS_HASH_SIZE; i++) + INIT_HLIST_HEAD(&tport->hash[i]); + + rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport); + return tport; +} + +/* + * Delete a target local port. + * Caller holds ft_lport_lock. + */ +static void ft_tport_delete(struct ft_tport *tport) +{ + struct fc_lport *lport; + struct ft_tpg *tpg; + + ft_sess_delete_all(tport); + lport = tport->lport; + lport->service_params &= ~FCP_SPPF_TARG_FCN; + BUG_ON(tport != lport->prov[FC_TYPE_FCP]); + RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL); + + tpg = tport->tpg; + if (tpg) { + tpg->tport = NULL; + tport->tpg = NULL; + } + kfree_rcu(tport, rcu); +} + +/* + * Add local port. + * Called thru fc_lport_iterate(). + */ +void ft_lport_add(struct fc_lport *lport, void *arg) +{ + mutex_lock(&ft_lport_lock); + ft_tport_get(lport); + lport->service_params |= FCP_SPPF_TARG_FCN; + mutex_unlock(&ft_lport_lock); +} + +/* + * Delete local port. + * Called thru fc_lport_iterate(). + */ +void ft_lport_del(struct fc_lport *lport, void *arg) +{ + struct ft_tport *tport; + + mutex_lock(&ft_lport_lock); + tport = lport->prov[FC_TYPE_FCP]; + if (tport) + ft_tport_delete(tport); + mutex_unlock(&ft_lport_lock); +} + +/* + * Notification of local port change from libfc. + * Create or delete local port and associated tport. + */ +int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg) +{ + struct fc_lport *lport = arg; + + switch (event) { + case FC_LPORT_EV_ADD: + ft_lport_add(lport, NULL); + break; + case FC_LPORT_EV_DEL: + ft_lport_del(lport, NULL); + break; + } + return NOTIFY_DONE; +} + +/* + * Hash function for FC_IDs. + */ +static u32 ft_sess_hash(u32 port_id) +{ + return hash_32(port_id, FT_SESS_HASH_BITS); +} + +/* + * Find session in local port. + * Sessions and hash lists are RCU-protected. + * A reference is taken which must be eventually freed. + */ +static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) +{ + struct ft_tport *tport; + struct hlist_head *head; + struct ft_sess *sess; + char *reason = "no session created"; + + rcu_read_lock(); + tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); + if (!tport) { + reason = "not an FCP port"; + goto out; + } + + head = &tport->hash[ft_sess_hash(port_id)]; + hlist_for_each_entry_rcu(sess, head, hash) { + if (sess->port_id == port_id) { + kref_get(&sess->kref); + rcu_read_unlock(); + TFC_SESS_DBG(lport, "port_id %x found %p\n", + port_id, sess); + return sess; + } + } +out: + rcu_read_unlock(); + TFC_SESS_DBG(lport, "port_id %x not found, %s\n", + port_id, reason); + return NULL; +} + +static int ft_sess_alloc_cb(struct se_portal_group *se_tpg, + struct se_session *se_sess, void *p) +{ + struct ft_sess *sess = p; + struct ft_tport *tport = sess->tport; + struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)]; + + TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess); + hlist_add_head_rcu(&sess->hash, head); + tport->sess_count++; + + return 0; +} + +/* + * Allocate session and enter it in the hash for the local port. + * Caller holds ft_lport_lock. + */ +static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, + struct fc_rport_priv *rdata) +{ + struct se_portal_group *se_tpg = &tport->tpg->se_tpg; + struct ft_sess *sess; + struct hlist_head *head; + unsigned char initiatorname[TRANSPORT_IQN_LEN]; + + ft_format_wwn(&initiatorname[0], TRANSPORT_IQN_LEN, rdata->ids.port_name); + + head = &tport->hash[ft_sess_hash(port_id)]; + hlist_for_each_entry_rcu(sess, head, hash) + if (sess->port_id == port_id) + return sess; + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) + return ERR_PTR(-ENOMEM); + + kref_init(&sess->kref); /* ref for table entry */ + sess->tport = tport; + sess->port_id = port_id; + + sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS, + sizeof(struct ft_cmd), + TARGET_PROT_NORMAL, &initiatorname[0], + sess, ft_sess_alloc_cb); + if (IS_ERR(sess->se_sess)) { + int rc = PTR_ERR(sess->se_sess); + kfree(sess); + sess = ERR_PTR(rc); + } + return sess; +} + +/* + * Unhash the session. + * Caller holds ft_lport_lock. + */ +static void ft_sess_unhash(struct ft_sess *sess) +{ + struct ft_tport *tport = sess->tport; + + hlist_del_rcu(&sess->hash); + BUG_ON(!tport->sess_count); + tport->sess_count--; + sess->port_id = -1; + sess->params = 0; +} + +/* + * Delete session from hash. + * Caller holds ft_lport_lock. + */ +static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) +{ + struct hlist_head *head; + struct ft_sess *sess; + + head = &tport->hash[ft_sess_hash(port_id)]; + hlist_for_each_entry_rcu(sess, head, hash) { + if (sess->port_id == port_id) { + ft_sess_unhash(sess); + return sess; + } + } + return NULL; +} + +static void ft_close_sess(struct ft_sess *sess) +{ + target_stop_session(sess->se_sess); + target_wait_for_sess_cmds(sess->se_sess); + ft_sess_put(sess); +} + +/* + * Delete all sessions from tport. + * Caller holds ft_lport_lock. + */ +static void ft_sess_delete_all(struct ft_tport *tport) +{ + struct hlist_head *head; + struct ft_sess *sess; + + for (head = tport->hash; + head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { + hlist_for_each_entry_rcu(sess, head, hash) { + ft_sess_unhash(sess); + ft_close_sess(sess); /* release from table */ + } + } +} + +/* + * TCM ops for sessions. + */ + +/* + * Remove session and send PRLO. + * This is called when the ACL is being deleted or queue depth is changing. + */ +void ft_sess_close(struct se_session *se_sess) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + u32 port_id; + + mutex_lock(&ft_lport_lock); + port_id = sess->port_id; + if (port_id == -1) { + mutex_unlock(&ft_lport_lock); + return; + } + TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id); + ft_sess_unhash(sess); + mutex_unlock(&ft_lport_lock); + ft_close_sess(sess); + /* XXX Send LOGO or PRLO */ + synchronize_rcu(); /* let transport deregister happen */ +} + +u32 ft_sess_get_index(struct se_session *se_sess) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + + return sess->port_id; /* XXX TBD probably not what is needed */ +} + +u32 ft_sess_get_port_name(struct se_session *se_sess, + unsigned char *buf, u32 len) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + + return ft_format_wwn(buf, len, sess->port_name); +} + +/* + * libfc ops involving sessions. + */ + +static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, struct fc_els_spp *spp) +{ + struct ft_tport *tport; + struct ft_sess *sess; + u32 fcp_parm; + + tport = ft_tport_get(rdata->local_port); + if (!tport) + goto not_target; /* not a target for this local port */ + + if (!rspp) + goto fill; + + if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL)) + return FC_SPP_RESP_NO_PA; + + /* + * If both target and initiator bits are off, the SPP is invalid. + */ + fcp_parm = ntohl(rspp->spp_params); + if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN))) + return FC_SPP_RESP_INVL; + + /* + * Create session (image pair) only if requested by + * EST_IMG_PAIR flag and if the requestor is an initiator. + */ + if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) { + spp->spp_flags |= FC_SPP_EST_IMG_PAIR; + if (!(fcp_parm & FCP_SPPF_INIT_FCN)) + return FC_SPP_RESP_CONF; + sess = ft_sess_create(tport, rdata->ids.port_id, rdata); + if (IS_ERR(sess)) { + if (PTR_ERR(sess) == -EACCES) { + spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR; + return FC_SPP_RESP_CONF; + } else + return FC_SPP_RESP_RES; + } + if (!sess->params) + rdata->prli_count++; + sess->params = fcp_parm; + sess->port_name = rdata->ids.port_name; + sess->max_frame = rdata->maxframe_size; + + /* XXX TBD - clearing actions. unit attn, see 4.10 */ + } + + /* + * OR in our service parameters with other provider (initiator), if any. + */ +fill: + fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_RETRY; + spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN); + return FC_SPP_RESP_ACK; + +not_target: + fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_TARG_FCN; + spp->spp_params = htonl(fcp_parm); + return 0; +} + +/** + * ft_prli() - Handle incoming or outgoing PRLI for the FCP target + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page (NULL for outgoing PRLI) + * @spp: response service parameter page + * + * Returns spp response code. + */ +static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, struct fc_els_spp *spp) +{ + int ret; + + mutex_lock(&ft_lport_lock); + ret = ft_prli_locked(rdata, spp_len, rspp, spp); + mutex_unlock(&ft_lport_lock); + TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n", + rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); + return ret; +} + +static void ft_sess_free(struct kref *kref) +{ + struct ft_sess *sess = container_of(kref, struct ft_sess, kref); + + target_remove_session(sess->se_sess); + kfree_rcu(sess, rcu); +} + +void ft_sess_put(struct ft_sess *sess) +{ + int sess_held = kref_read(&sess->kref); + + BUG_ON(!sess_held); + kref_put(&sess->kref, ft_sess_free); +} + +static void ft_prlo(struct fc_rport_priv *rdata) +{ + struct ft_sess *sess; + struct ft_tport *tport; + + mutex_lock(&ft_lport_lock); + tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP], + lockdep_is_held(&ft_lport_lock)); + + if (!tport) { + mutex_unlock(&ft_lport_lock); + return; + } + sess = ft_sess_delete(tport, rdata->ids.port_id); + if (!sess) { + mutex_unlock(&ft_lport_lock); + return; + } + mutex_unlock(&ft_lport_lock); + ft_close_sess(sess); /* release from table */ + rdata->prli_count--; + /* XXX TBD - clearing actions. unit attn, see 4.10 */ +} + +/* + * Handle incoming FCP request. + * Caller has verified that the frame is type FCP. + */ +static void ft_recv(struct fc_lport *lport, struct fc_frame *fp) +{ + struct ft_sess *sess; + u32 sid = fc_frame_sid(fp); + + TFC_SESS_DBG(lport, "recv sid %x\n", sid); + + sess = ft_sess_get(lport, sid); + if (!sess) { + TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid); + /* TBD XXX - if FCP_CMND, send PRLO */ + fc_frame_free(fp); + return; + } + ft_recv_req(sess, fp); /* must do ft_sess_put() */ +} + +/* + * Provider ops for libfc. + */ +struct fc4_prov ft_prov = { + .prli = ft_prli, + .prlo = ft_prlo, + .recv = ft_recv, + .module = THIS_MODULE, +}; diff --git a/drivers/target/tcm_remote/Kconfig b/drivers/target/tcm_remote/Kconfig new file mode 100644 index 0000000000..e6bebb5fe6 --- /dev/null +++ b/drivers/target/tcm_remote/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config REMOTE_TARGET + tristate "TCM Virtual Remote target" + depends on SCSI + help + Say Y here to enable the TCM Virtual Remote fabric + That fabric is a dummy fabric to tell TCM about configuration + of TPG/ACL/LUN on peer nodes in a cluster. diff --git a/drivers/target/tcm_remote/Makefile b/drivers/target/tcm_remote/Makefile new file mode 100644 index 0000000000..5818ffd0b0 --- /dev/null +++ b/drivers/target/tcm_remote/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_REMOTE_TARGET) += tcm_remote.o diff --git a/drivers/target/tcm_remote/tcm_remote.c b/drivers/target/tcm_remote/tcm_remote.c new file mode 100644 index 0000000000..cb8db25580 --- /dev/null +++ b/drivers/target/tcm_remote/tcm_remote.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/configfs.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "tcm_remote.h" + +static inline struct tcm_remote_tpg *remote_tpg(struct se_portal_group *se_tpg) +{ + return container_of(se_tpg, struct tcm_remote_tpg, remote_se_tpg); +} + +static char *tcm_remote_get_endpoint_wwn(struct se_portal_group *se_tpg) +{ + /* + * Return the passed NAA identifier for the Target Port + */ + return &remote_tpg(se_tpg)->remote_hba->remote_wwn_address[0]; +} + +static u16 tcm_remote_get_tag(struct se_portal_group *se_tpg) +{ + /* + * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 + * to represent the SCSI Target Port. + */ + return remote_tpg(se_tpg)->remote_tpgt; +} + +static int tcm_remote_dummy_cmd_fn(struct se_cmd *se_cmd) +{ + return 0; +} + +static void tcm_remote_dummy_cmd_void_fn(struct se_cmd *se_cmd) +{ + +} + +static char *tcm_remote_dump_proto_id(struct tcm_remote_hba *remote_hba) +{ + switch (remote_hba->remote_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_SRP: + return "SRP"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +static int tcm_remote_port_link( + struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + pr_debug("TCM_Remote_ConfigFS: Port Link LUN %lld Successful\n", + lun->unpacked_lun); + return 0; +} + +static void tcm_remote_port_unlink( + struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + pr_debug("TCM_Remote_ConfigFS: Port Unlink LUN %lld Successful\n", + lun->unpacked_lun); +} + +static struct se_portal_group *tcm_remote_make_tpg( + struct se_wwn *wwn, + const char *name) +{ + struct tcm_remote_hba *remote_hba = container_of(wwn, + struct tcm_remote_hba, remote_hba_wwn); + struct tcm_remote_tpg *remote_tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) { + pr_err("Unable to locate \"tpgt_#\" directory group\n"); + return ERR_PTR(-EINVAL); + } + if (kstrtoul(name + 5, 10, &tpgt)) + return ERR_PTR(-EINVAL); + + if (tpgt >= TL_TPGS_PER_HBA) { + pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n", + tpgt, TL_TPGS_PER_HBA); + return ERR_PTR(-EINVAL); + } + remote_tpg = &remote_hba->remote_hba_tpgs[tpgt]; + remote_tpg->remote_hba = remote_hba; + remote_tpg->remote_tpgt = tpgt; + /* + * Register the remote_tpg as a emulated TCM Target Endpoint + */ + ret = core_tpg_register(wwn, &remote_tpg->remote_se_tpg, + remote_hba->remote_proto_id); + if (ret < 0) + return ERR_PTR(-ENOMEM); + + pr_debug("TCM_Remote_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n", + tcm_remote_dump_proto_id(remote_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); + return &remote_tpg->remote_se_tpg; +} + +static void tcm_remote_drop_tpg(struct se_portal_group *se_tpg) +{ + struct se_wwn *wwn = se_tpg->se_tpg_wwn; + struct tcm_remote_tpg *remote_tpg = container_of(se_tpg, + struct tcm_remote_tpg, remote_se_tpg); + struct tcm_remote_hba *remote_hba; + unsigned short tpgt; + + remote_hba = remote_tpg->remote_hba; + tpgt = remote_tpg->remote_tpgt; + + /* + * Deregister the remote_tpg as a emulated TCM Target Endpoint + */ + core_tpg_deregister(se_tpg); + + remote_tpg->remote_hba = NULL; + remote_tpg->remote_tpgt = 0; + + pr_debug("TCM_Remote_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n", + tcm_remote_dump_proto_id(remote_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); +} + +static struct se_wwn *tcm_remote_make_wwn( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_remote_hba *remote_hba; + char *ptr; + int ret, off = 0; + + remote_hba = kzalloc(sizeof(*remote_hba), GFP_KERNEL); + if (!remote_hba) + return ERR_PTR(-ENOMEM); + + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + remote_hba->remote_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + remote_hba->remote_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "0x"); + if (ptr) { + remote_hba->remote_proto_id = SCSI_PROTOCOL_SRP; + off = 2; /* Skip over "0x" */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (!ptr) { + pr_err("Unable to locate prefix for emulated Target Port: %s\n", + name); + ret = -EINVAL; + goto out; + } + remote_hba->remote_proto_id = SCSI_PROTOCOL_ISCSI; + +check_len: + if (strlen(name) >= TL_WWN_ADDR_LEN) { + pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n", + name, tcm_remote_dump_proto_id(remote_hba), TL_WWN_ADDR_LEN); + ret = -EINVAL; + goto out; + } + snprintf(&remote_hba->remote_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); + + pr_debug("TCM_Remote_ConfigFS: Allocated emulated Target %s Address: %s\n", + tcm_remote_dump_proto_id(remote_hba), name); + return &remote_hba->remote_hba_wwn; +out: + kfree(remote_hba); + return ERR_PTR(ret); +} + +static void tcm_remote_drop_wwn(struct se_wwn *wwn) +{ + struct tcm_remote_hba *remote_hba = container_of(wwn, + struct tcm_remote_hba, remote_hba_wwn); + + pr_debug("TCM_Remote_ConfigFS: Deallocating emulated Target %s Address: %s\n", + tcm_remote_dump_proto_id(remote_hba), + remote_hba->remote_wwn_address); + kfree(remote_hba); +} + +static ssize_t tcm_remote_wwn_version_show(struct config_item *item, char *page) +{ + return sprintf(page, "TCM Remote Fabric module %s\n", TCM_REMOTE_VERSION); +} + +CONFIGFS_ATTR_RO(tcm_remote_wwn_, version); + +static struct configfs_attribute *tcm_remote_wwn_attrs[] = { + &tcm_remote_wwn_attr_version, + NULL, +}; + +static const struct target_core_fabric_ops remote_ops = { + .module = THIS_MODULE, + .fabric_name = "remote", + .tpg_get_wwn = tcm_remote_get_endpoint_wwn, + .tpg_get_tag = tcm_remote_get_tag, + .check_stop_free = tcm_remote_dummy_cmd_fn, + .release_cmd = tcm_remote_dummy_cmd_void_fn, + .write_pending = tcm_remote_dummy_cmd_fn, + .queue_data_in = tcm_remote_dummy_cmd_fn, + .queue_status = tcm_remote_dummy_cmd_fn, + .queue_tm_rsp = tcm_remote_dummy_cmd_void_fn, + .aborted_task = tcm_remote_dummy_cmd_void_fn, + .fabric_make_wwn = tcm_remote_make_wwn, + .fabric_drop_wwn = tcm_remote_drop_wwn, + .fabric_make_tpg = tcm_remote_make_tpg, + .fabric_drop_tpg = tcm_remote_drop_tpg, + .fabric_post_link = tcm_remote_port_link, + .fabric_pre_unlink = tcm_remote_port_unlink, + .tfc_wwn_attrs = tcm_remote_wwn_attrs, +}; + +static int __init tcm_remote_fabric_init(void) +{ + return target_register_template(&remote_ops); +} + +static void __exit tcm_remote_fabric_exit(void) +{ + target_unregister_template(&remote_ops); +} + +MODULE_DESCRIPTION("TCM virtual remote target"); +MODULE_AUTHOR("Dmitry Bogdanov <d.bogdanov@yadro.com>"); +MODULE_LICENSE("GPL"); +module_init(tcm_remote_fabric_init); +module_exit(tcm_remote_fabric_exit); diff --git a/drivers/target/tcm_remote/tcm_remote.h b/drivers/target/tcm_remote/tcm_remote.h new file mode 100644 index 0000000000..913d1a6eb3 --- /dev/null +++ b/drivers/target/tcm_remote/tcm_remote.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/types.h> +#include <linux/device.h> + +#define TCM_REMOTE_VERSION "v0.1" +#define TL_WWN_ADDR_LEN 256 +#define TL_TPGS_PER_HBA 32 + +struct tcm_remote_tpg { + unsigned short remote_tpgt; + struct se_portal_group remote_se_tpg; + struct tcm_remote_hba *remote_hba; +}; + +struct tcm_remote_hba { + u8 remote_proto_id; + unsigned char remote_wwn_address[TL_WWN_ADDR_LEN]; + struct tcm_remote_tpg remote_hba_tpgs[TL_TPGS_PER_HBA]; + struct se_wwn remote_hba_wwn; +}; |