diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/infiniband/ulp/iser | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/ulp/iser')
-rw-r--r-- | drivers/infiniband/ulp/iser/Kconfig | 12 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/Makefile | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 1122 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 716 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 784 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 579 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 1174 |
7 files changed, 4391 insertions, 0 deletions
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig new file mode 100644 index 000000000..d00af71a2 --- /dev/null +++ b/drivers/infiniband/ulp/iser/Kconfig @@ -0,0 +1,12 @@ +config INFINIBAND_ISER + tristate "iSCSI Extensions for RDMA (iSER)" + depends on SCSI && INET && INFINIBAND_ADDR_TRANS + select SCSI_ISCSI_ATTRS + ---help--- + Support for the iSCSI Extensions for RDMA (iSER) Protocol + over InfiniBand. This allows you to access storage devices + that speak iSCSI over iSER over InfiniBand. + + The iSER protocol is defined by IETF. + See <http://www.ietf.org/rfc/rfc5046.txt> + and <http://members.infinibandta.org/kwspub/spec/Annex_iSER.PDF> diff --git a/drivers/infiniband/ulp/iser/Makefile b/drivers/infiniband/ulp/iser/Makefile new file mode 100644 index 000000000..fe6cd15f2 --- /dev/null +++ b/drivers/infiniband/ulp/iser/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_INFINIBAND_ISER) += ib_iser.o + +ib_iser-y := iser_verbs.o iser_initiator.o iser_memory.o \ + iscsi_iser.o diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c new file mode 100644 index 000000000..b4e0ae024 --- /dev/null +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -0,0 +1,1122 @@ +/* + * iSCSI Initiator over iSER Data-Path + * + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 Mike Christie + * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * maintained by openib-general@openib.org + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Credits: + * Christoph Hellwig + * FUJITA Tomonori + * Arne Redlich + * Zhenyu Wang + * Modified by: + * Erez Zilber + */ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/hardirq.h> +#include <linux/kfifo.h> +#include <linux/blkdev.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/in.h> +#include <linux/net.h> +#include <linux/scatterlist.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include <net/sock.h> + +#include <linux/uaccess.h> + +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi.h> +#include <scsi/scsi_transport_iscsi.h> + +#include "iscsi_iser.h" + +MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); + +static struct scsi_host_template iscsi_iser_sht; +static struct iscsi_transport iscsi_iser_transport; +static struct scsi_transport_template *iscsi_iser_scsi_transport; +static struct workqueue_struct *release_wq; +static DEFINE_MUTEX(unbind_iser_conn_mutex); +struct iser_global ig; + +int iser_debug_level = 0; +module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); + +static unsigned int iscsi_max_lun = 512; +module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); +MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512"); + +unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; +module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); + +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); + +bool iser_pi_enable = false; +module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); +MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); + +int iser_pi_guard; +module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); +MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); + +/* + * iscsi_iser_recv() - Process a successful recv completion + * @conn: iscsi connection + * @hdr: iscsi header + * @rx_data: buffer containing receive data payload + * @rx_data_len: length of rx_data + * + * Notes: In case of data length errors or iscsi PDU completion failures + * this routine will signal iscsi layer of connection failure. + */ +void +iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *rx_data, int rx_data_len) +{ + int rc = 0; + int datalen; + + /* verify PDU length */ + datalen = ntoh24(hdr->dlength); + if (datalen > rx_data_len || (datalen + 4) < rx_data_len) { + iser_err("wrong datalen %d (hdr), %d (IB)\n", + datalen, rx_data_len); + rc = ISCSI_ERR_DATALEN; + goto error; + } + + if (datalen != rx_data_len) + iser_dbg("aligned datalen (%d) hdr, %d (IB)\n", + datalen, rx_data_len); + + rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); + if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) + goto error; + + return; +error: + iscsi_conn_failure(conn, rc); +} + +/** + * iscsi_iser_pdu_alloc() - allocate an iscsi-iser PDU + * @task: iscsi task + * @opcode: iscsi command opcode + * + * Netes: This routine can't fail, just assign iscsi task + * hdr and max hdr size. + */ +static int +iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + + task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header; + task->hdr_max = sizeof(iser_task->desc.iscsi_header); + + return 0; +} + +/** + * iser_initialize_task_headers() - Initialize task headers + * @task: iscsi task + * @tx_desc: iser tx descriptor + * + * Notes: + * This routine may race with iser teardown flow for scsi + * error handling TMFs. So for TMF we should acquire the + * state mutex to avoid dereferencing the IB device which + * may have already been terminated. + */ +int +iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc) +{ + struct iser_conn *iser_conn = task->conn->dd_data; + struct iser_device *device = iser_conn->ib_conn.device; + struct iscsi_iser_task *iser_task = task->dd_data; + u64 dma_addr; + const bool mgmt_task = !task->sc && !in_interrupt(); + int ret = 0; + + if (unlikely(mgmt_task)) + mutex_lock(&iser_conn->state_mutex); + + if (unlikely(iser_conn->state != ISER_CONN_UP)) { + ret = -ENODEV; + goto out; + } + + dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + if (ib_dma_mapping_error(device->ib_device, dma_addr)) { + ret = -ENOMEM; + goto out; + } + + tx_desc->wr_idx = 0; + tx_desc->mapped = true; + tx_desc->dma_addr = dma_addr; + tx_desc->tx_sg[0].addr = tx_desc->dma_addr; + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; + + iser_task->iser_conn = iser_conn; +out: + if (unlikely(mgmt_task)) + mutex_unlock(&iser_conn->state_mutex); + + return ret; +} + +/** + * iscsi_iser_task_init() - Initialize iscsi-iser task + * @task: iscsi task + * + * Initialize the task for the scsi command or mgmt command. + * + * Return: Returns zero on success or -ENOMEM when failing + * to init task headers (dma mapping error). + */ +static int +iscsi_iser_task_init(struct iscsi_task *task) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + int ret; + + ret = iser_initialize_task_headers(task, &iser_task->desc); + if (ret) { + iser_err("Failed to init task %p, err = %d\n", + iser_task, ret); + return ret; + } + + /* mgmt task */ + if (!task->sc) + return 0; + + iser_task->command_sent = 0; + iser_task_rdma_init(iser_task); + iser_task->sc = task->sc; + + return 0; +} + +/** + * iscsi_iser_mtask_xmit() - xmit management (immediate) task + * @conn: iscsi connection + * @task: task management task + * + * Notes: + * The function can return -EAGAIN in which case caller must + * call it again later, or recover. '0' return code means successful + * xmit. + * + **/ +static int +iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +{ + int error = 0; + + iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt); + + error = iser_send_control(conn, task); + + /* since iser xmits control with zero copy, tasks can not be recycled + * right after sending them. + * The recycling scheme is based on whether a response is expected + * - if yes, the task is recycled at iscsi_complete_pdu + * - if no, the task is recycled at iser_snd_completion + */ + return error; +} + +static int +iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, + struct iscsi_task *task) +{ + struct iscsi_r2t_info *r2t = &task->unsol_r2t; + struct iscsi_data hdr; + int error = 0; + + /* Send data-out PDUs while there's still unsolicited data to send */ + while (iscsi_task_has_unsol_data(task)) { + iscsi_prep_data_out_pdu(task, r2t, &hdr); + iser_dbg("Sending data-out: itt 0x%x, data count %d\n", + hdr.itt, r2t->data_count); + + /* the buffer description has been passed with the command */ + /* Send the command */ + error = iser_send_data_out(conn, task, &hdr); + if (error) { + r2t->datasn--; + goto iscsi_iser_task_xmit_unsol_data_exit; + } + r2t->sent += r2t->data_count; + iser_dbg("Need to send %d more as data-out PDUs\n", + r2t->data_length - r2t->sent); + } + +iscsi_iser_task_xmit_unsol_data_exit: + return error; +} + +/** + * iscsi_iser_task_xmit() - xmit iscsi-iser task + * @task: iscsi task + * + * Return: zero on success or escalates $error on failure. + */ +static int +iscsi_iser_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_iser_task *iser_task = task->dd_data; + int error = 0; + + if (!task->sc) + return iscsi_iser_mtask_xmit(conn, task); + + if (task->sc->sc_data_direction == DMA_TO_DEVICE) { + BUG_ON(scsi_bufflen(task->sc) == 0); + + iser_dbg("cmd [itt %x total %d imm %d unsol_data %d\n", + task->itt, scsi_bufflen(task->sc), + task->imm_count, task->unsol_r2t.data_length); + } + + iser_dbg("ctask xmit [cid %d itt 0x%x]\n", + conn->id, task->itt); + + /* Send the cmd PDU */ + if (!iser_task->command_sent) { + error = iser_send_command(conn, task); + if (error) + goto iscsi_iser_task_xmit_exit; + iser_task->command_sent = 1; + } + + /* Send unsolicited data-out PDU(s) if necessary */ + if (iscsi_task_has_unsol_data(task)) + error = iscsi_iser_task_xmit_unsol_data(conn, task); + + iscsi_iser_task_xmit_exit: + return error; +} + +/** + * iscsi_iser_cleanup_task() - cleanup an iscsi-iser task + * @task: iscsi task + * + * Notes: In case the RDMA device is already NULL (might have + * been removed in DEVICE_REMOVAL CM event it will bail-out + * without doing dma unmapping. + */ +static void iscsi_iser_cleanup_task(struct iscsi_task *task) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct iser_conn *iser_conn = task->conn->dd_data; + struct iser_device *device = iser_conn->ib_conn.device; + + /* DEVICE_REMOVAL event might have already released the device */ + if (!device) + return; + + if (likely(tx_desc->mapped)) { + ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + tx_desc->mapped = false; + } + + /* mgmt tasks do not need special cleanup */ + if (!task->sc) + return; + + if (iser_task->status == ISER_TASK_STATUS_STARTED) { + iser_task->status = ISER_TASK_STATUS_COMPLETED; + iser_task_rdma_finalize(iser_task); + } +} + +/** + * iscsi_iser_check_protection() - check protection information status of task. + * @task: iscsi task + * @sector: error sector if exsists (output) + * + * Return: zero if no data-integrity errors have occured + * 0x1: data-integrity error occured in the guard-block + * 0x2: data-integrity error occured in the reference tag + * 0x3: data-integrity error occured in the application tag + * + * In addition the error sector is marked. + */ +static u8 +iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + + if (iser_task->dir[ISER_DIR_IN]) + return iser_check_task_pi_status(iser_task, ISER_DIR_IN, + sector); + else + return iser_check_task_pi_status(iser_task, ISER_DIR_OUT, + sector); +} + +/** + * iscsi_iser_conn_create() - create a new iscsi-iser connection + * @cls_session: iscsi class connection + * @conn_idx: connection index within the session (for MCS) + * + * Return: iscsi_cls_conn when iscsi_conn_setup succeeds or NULL + * otherwise. + */ +static struct iscsi_cls_conn * +iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, + uint32_t conn_idx) +{ + struct iscsi_conn *conn; + struct iscsi_cls_conn *cls_conn; + + cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + + /* + * due to issues with the login code re iser sematics + * this not set in iscsi_conn_setup - FIXME + */ + conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN; + + return cls_conn; +} + +/** + * iscsi_iser_conn_bind() - bind iscsi and iser connection structures + * @cls_session: iscsi class session + * @cls_conn: iscsi class connection + * @transport_eph: transport end-point handle + * @is_leading: indicate if this is the session leading connection (MCS) + * + * Return: zero on success, $error if iscsi_conn_bind fails and + * -EINVAL in case end-point doesn't exsits anymore or iser connection + * state is not UP (teardown already started). + */ +static int +iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_eph, + int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iser_conn *iser_conn; + struct iscsi_endpoint *ep; + int error; + + error = iscsi_conn_bind(cls_session, cls_conn, is_leading); + if (error) + return error; + + /* the transport ep handle comes from user space so it must be + * verified against the global ib connections list */ + ep = iscsi_lookup_endpoint(transport_eph); + if (!ep) { + iser_err("can't bind eph %llx\n", + (unsigned long long)transport_eph); + return -EINVAL; + } + iser_conn = ep->dd_data; + + mutex_lock(&iser_conn->state_mutex); + if (iser_conn->state != ISER_CONN_UP) { + error = -EINVAL; + iser_err("iser_conn %p state is %d, teardown started\n", + iser_conn, iser_conn->state); + goto out; + } + + error = iser_alloc_rx_descriptors(iser_conn, conn->session); + if (error) + goto out; + + /* binds the iSER connection retrieved from the previously + * connected ep_handle to the iSCSI layer connection. exchanges + * connection pointers */ + iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn); + + conn->dd_data = iser_conn; + iser_conn->iscsi_conn = conn; + +out: + mutex_unlock(&iser_conn->state_mutex); + return error; +} + +/** + * iscsi_iser_conn_start() - start iscsi-iser connection + * @cls_conn: iscsi class connection + * + * Notes: Here iser intialize (or re-initialize) stop_completion as + * from this point iscsi must call conn_stop in session/connection + * teardown so iser transport must wait for it. + */ +static int +iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *iscsi_conn; + struct iser_conn *iser_conn; + + iscsi_conn = cls_conn->dd_data; + iser_conn = iscsi_conn->dd_data; + reinit_completion(&iser_conn->stop_completion); + + return iscsi_conn_start(cls_conn); +} + +/** + * iscsi_iser_conn_stop() - stop iscsi-iser connection + * @cls_conn: iscsi class connection + * @flag: indicate if recover or terminate (passed as is) + * + * Notes: Calling iscsi_conn_stop might theoretically race with + * DEVICE_REMOVAL event and dereference a previously freed RDMA device + * handle, so we call it under iser the state lock to protect against + * this kind of race. + */ +static void +iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iser_conn *iser_conn = conn->dd_data; + + iser_info("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn); + + /* + * Userspace may have goofed up and not bound the connection or + * might have only partially setup the connection. + */ + if (iser_conn) { + mutex_lock(&iser_conn->state_mutex); + mutex_lock(&unbind_iser_conn_mutex); + iser_conn_terminate(iser_conn); + iscsi_conn_stop(cls_conn, flag); + + /* unbind */ + iser_conn->iscsi_conn = NULL; + conn->dd_data = NULL; + mutex_unlock(&unbind_iser_conn_mutex); + + complete(&iser_conn->stop_completion); + mutex_unlock(&iser_conn->state_mutex); + } else { + iscsi_conn_stop(cls_conn, flag); + } +} + +/** + * iscsi_iser_session_destroy() - destroy iscsi-iser session + * @cls_session: iscsi class session + * + * Removes and free iscsi host. + */ +static void +iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + + iscsi_session_teardown(cls_session); + iscsi_host_remove(shost); + iscsi_host_free(shost); +} + +static inline unsigned int +iser_dif_prot_caps(int prot_caps) +{ + return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? + SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION : 0) | + ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? + SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) | + ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0); +} + +/** + * iscsi_iser_session_create() - create an iscsi-iser session + * @ep: iscsi end-point handle + * @cmds_max: maximum commands in this session + * @qdepth: session command queue depth + * @initial_cmdsn: initiator command sequnce number + * + * Allocates and adds a scsi host, expose DIF supprot if + * exists, and sets up an iscsi session. + */ +static struct iscsi_cls_session * +iscsi_iser_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn) +{ + struct iscsi_cls_session *cls_session; + struct Scsi_Host *shost; + struct iser_conn *iser_conn = NULL; + struct ib_conn *ib_conn; + u32 max_fr_sectors; + + shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); + if (!shost) + return NULL; + shost->transportt = iscsi_iser_scsi_transport; + shost->cmd_per_lun = qdepth; + shost->max_lun = iscsi_max_lun; + shost->max_id = 0; + shost->max_channel = 0; + shost->max_cmd_len = 16; + + /* + * older userspace tools (before 2.0-870) did not pass us + * the leading conn's ep so this will be NULL; + */ + if (ep) { + iser_conn = ep->dd_data; + shost->sg_tablesize = iser_conn->scsi_sg_tablesize; + shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds); + + mutex_lock(&iser_conn->state_mutex); + if (iser_conn->state != ISER_CONN_UP) { + iser_err("iser conn %p already started teardown\n", + iser_conn); + mutex_unlock(&iser_conn->state_mutex); + goto free_host; + } + + ib_conn = &iser_conn->ib_conn; + if (ib_conn->pi_support) { + u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap; + + shost->sg_prot_tablesize = shost->sg_tablesize; + scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP | + SHOST_DIX_GUARD_CRC); + } + + if (iscsi_host_add(shost, + ib_conn->device->ib_device->dev.parent)) { + mutex_unlock(&iser_conn->state_mutex); + goto free_host; + } + mutex_unlock(&iser_conn->state_mutex); + } else { + shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX); + if (iscsi_host_add(shost, NULL)) + goto free_host; + } + + max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9; + shost->max_sectors = min(iser_max_sectors, max_fr_sectors); + + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", + iser_conn, shost->sg_tablesize, + shost->max_sectors); + + if (shost->max_sectors < iser_max_sectors) + iser_warn("max_sectors was reduced from %u to %u\n", + iser_max_sectors, shost->max_sectors); + + cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, + shost->can_queue, 0, + sizeof(struct iscsi_iser_task), + initial_cmdsn, 0); + if (!cls_session) + goto remove_host; + + return cls_session; + +remove_host: + iscsi_host_remove(shost); +free_host: + iscsi_host_free(shost); + return NULL; +} + +static int +iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) +{ + int value; + + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + /* TBD */ + break; + case ISCSI_PARAM_HDRDGST_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("DataDigest wasn't negotiated to None\n"); + return -EPROTO; + } + break; + case ISCSI_PARAM_DATADGST_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("DataDigest wasn't negotiated to None\n"); + return -EPROTO; + } + break; + case ISCSI_PARAM_IFMARKER_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("IFMarker wasn't negotiated to No\n"); + return -EPROTO; + } + break; + case ISCSI_PARAM_OFMARKER_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("OFMarker wasn't negotiated to No\n"); + return -EPROTO; + } + break; + default: + return iscsi_set_param(cls_conn, param, buf, buflen); + } + + return 0; +} + +/** + * iscsi_iser_set_param() - set class connection parameter + * @cls_conn: iscsi class connection + * @stats: iscsi stats to output + * + * Output connection statistics. + */ +static void +iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; /* always 0 */ + stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */ + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->custom_length = 0; +} + +static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct iser_conn *iser_conn = ep->dd_data; + int len; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + if (!iser_conn || !iser_conn->ib_conn.cma_id) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &iser_conn->ib_conn.cma_id->route.addr.dst_addr, + param, buf); + break; + default: + return -ENOSYS; + } + + return len; +} + +/** + * iscsi_iser_ep_connect() - Initiate iSER connection establishment + * @shost: scsi_host + * @dst_addr: destination address + * @non-blocking: indicate if routine can block + * + * Allocate an iscsi endpoint, an iser_conn structure and bind them. + * After that start RDMA connection establishment via rdma_cm. We + * don't allocate iser_conn embedded in iscsi_endpoint since in teardown + * the endpoint will be destroyed at ep_disconnect while iser_conn will + * cleanup its resources asynchronuously. + * + * Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error) + * if fails. + */ +static struct iscsi_endpoint * +iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) +{ + int err; + struct iser_conn *iser_conn; + struct iscsi_endpoint *ep; + + ep = iscsi_create_endpoint(0); + if (!ep) + return ERR_PTR(-ENOMEM); + + iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL); + if (!iser_conn) { + err = -ENOMEM; + goto failure; + } + + ep->dd_data = iser_conn; + iser_conn->ep = ep; + iser_conn_init(iser_conn); + + err = iser_connect(iser_conn, NULL, dst_addr, non_blocking); + if (err) + goto failure; + + return ep; +failure: + iscsi_destroy_endpoint(ep); + return ERR_PTR(err); +} + +/** + * iscsi_iser_ep_poll() - poll for iser connection establishment to complete + * @ep: iscsi endpoint (created at ep_connect) + * @timeout_ms: polling timeout allowed in ms. + * + * This routine boils down to waiting for up_completion signaling + * that cma_id got CONNECTED event. + * + * Return: 1 if succeeded in connection establishment, 0 if timeout expired + * (libiscsi will retry will kick in) or -1 if interrupted by signal + * or more likely iser connection state transitioned to TEMINATING or + * DOWN during the wait period. + */ +static int +iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct iser_conn *iser_conn = ep->dd_data; + int rc; + + rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion, + msecs_to_jiffies(timeout_ms)); + /* if conn establishment failed, return error code to iscsi */ + if (rc == 0) { + mutex_lock(&iser_conn->state_mutex); + if (iser_conn->state == ISER_CONN_TERMINATING || + iser_conn->state == ISER_CONN_DOWN) + rc = -1; + mutex_unlock(&iser_conn->state_mutex); + } + + iser_info("iser conn %p rc = %d\n", iser_conn, rc); + + if (rc > 0) + return 1; /* success, this is the equivalent of EPOLLOUT */ + else if (!rc) + return 0; /* timeout */ + else + return rc; /* signal */ +} + +/** + * iscsi_iser_ep_disconnect() - Initiate connection teardown process + * @ep: iscsi endpoint handle + * + * This routine is not blocked by iser and RDMA termination process + * completion as we queue a deffered work for iser/RDMA destruction + * and cleanup or actually call it immediately in case we didn't pass + * iscsi conn bind/start stage, thus it is safe. + */ +static void +iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct iser_conn *iser_conn = ep->dd_data; + + iser_info("ep %p iser conn %p\n", ep, iser_conn); + + mutex_lock(&iser_conn->state_mutex); + iser_conn_terminate(iser_conn); + + /* + * if iser_conn and iscsi_conn are bound, we must wait for + * iscsi_conn_stop and flush errors completion before freeing + * the iser resources. Otherwise we are safe to free resources + * immediately. + */ + if (iser_conn->iscsi_conn) { + INIT_WORK(&iser_conn->release_work, iser_release_work); + queue_work(release_wq, &iser_conn->release_work); + mutex_unlock(&iser_conn->state_mutex); + } else { + iser_conn->state = ISER_CONN_DOWN; + mutex_unlock(&iser_conn->state_mutex); + iser_conn_release(iser_conn); + } + + iscsi_destroy_endpoint(ep); +} + +static umode_t iser_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + case ISCSI_PARAM_DISCOVERY_SESS: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} + +static int iscsi_iser_slave_alloc(struct scsi_device *sdev) +{ + struct iscsi_session *session; + struct iser_conn *iser_conn; + struct ib_device *ib_dev; + + mutex_lock(&unbind_iser_conn_mutex); + + session = starget_to_session(scsi_target(sdev))->dd_data; + iser_conn = session->leadconn->dd_data; + if (!iser_conn) { + mutex_unlock(&unbind_iser_conn_mutex); + return -ENOTCONN; + } + ib_dev = iser_conn->ib_conn.device->ib_device; + + if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) + blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); + + mutex_unlock(&unbind_iser_conn_mutex); + + return 0; +} + +static struct scsi_host_template iscsi_iser_sht = { + .module = THIS_MODULE, + .name = "iSCSI Initiator over iSER", + .queuecommand = iscsi_queuecommand, + .change_queue_depth = scsi_change_queue_depth, + .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, + .cmd_per_lun = ISER_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler= iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .target_alloc = iscsi_target_alloc, + .use_clustering = ENABLE_CLUSTERING, + .slave_alloc = iscsi_iser_slave_alloc, + .proc_name = "iscsi_iser", + .this_id = -1, + .track_queue_depth = 1, +}; + +static struct iscsi_transport iscsi_iser_transport = { + .owner = THIS_MODULE, + .name = "iser", + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO, + /* session management */ + .create_session = iscsi_iser_session_create, + .destroy_session = iscsi_iser_session_destroy, + /* connection management */ + .create_conn = iscsi_iser_conn_create, + .bind_conn = iscsi_iser_conn_bind, + .destroy_conn = iscsi_conn_teardown, + .attr_is_visible = iser_attr_is_visible, + .set_param = iscsi_iser_set_param, + .get_conn_param = iscsi_conn_get_param, + .get_ep_param = iscsi_iser_get_ep_param, + .get_session_param = iscsi_session_get_param, + .start_conn = iscsi_iser_conn_start, + .stop_conn = iscsi_iser_conn_stop, + /* iscsi host params */ + .get_host_param = iscsi_host_get_param, + .set_host_param = iscsi_host_set_param, + /* IO */ + .send_pdu = iscsi_conn_send_pdu, + .get_stats = iscsi_iser_conn_get_stats, + .init_task = iscsi_iser_task_init, + .xmit_task = iscsi_iser_task_xmit, + .cleanup_task = iscsi_iser_cleanup_task, + .alloc_pdu = iscsi_iser_pdu_alloc, + .check_protection = iscsi_iser_check_protection, + /* recovery */ + .session_recovery_timedout = iscsi_session_recovery_timedout, + + .ep_connect = iscsi_iser_ep_connect, + .ep_poll = iscsi_iser_ep_poll, + .ep_disconnect = iscsi_iser_ep_disconnect +}; + +static int __init iser_init(void) +{ + int err; + + iser_dbg("Starting iSER datamover...\n"); + + if (iscsi_max_lun < 1) { + iser_err("Invalid max_lun value of %u\n", iscsi_max_lun); + return -EINVAL; + } + + memset(&ig, 0, sizeof(struct iser_global)); + + ig.desc_cache = kmem_cache_create("iser_descriptors", + sizeof(struct iser_tx_desc), + 0, SLAB_HWCACHE_ALIGN, + NULL); + if (ig.desc_cache == NULL) + return -ENOMEM; + + /* device init is called only after the first addr resolution */ + mutex_init(&ig.device_list_mutex); + INIT_LIST_HEAD(&ig.device_list); + mutex_init(&ig.connlist_mutex); + INIT_LIST_HEAD(&ig.connlist); + + release_wq = alloc_workqueue("release workqueue", 0, 0); + if (!release_wq) { + iser_err("failed to allocate release workqueue\n"); + err = -ENOMEM; + goto err_alloc_wq; + } + + iscsi_iser_scsi_transport = iscsi_register_transport( + &iscsi_iser_transport); + if (!iscsi_iser_scsi_transport) { + iser_err("iscsi_register_transport failed\n"); + err = -EINVAL; + goto err_reg; + } + + return 0; + +err_reg: + destroy_workqueue(release_wq); +err_alloc_wq: + kmem_cache_destroy(ig.desc_cache); + + return err; +} + +static void __exit iser_exit(void) +{ + struct iser_conn *iser_conn, *n; + int connlist_empty; + + iser_dbg("Removing iSER datamover...\n"); + destroy_workqueue(release_wq); + + mutex_lock(&ig.connlist_mutex); + connlist_empty = list_empty(&ig.connlist); + mutex_unlock(&ig.connlist_mutex); + + if (!connlist_empty) { + iser_err("Error cleanup stage completed but we still have iser " + "connections, destroying them anyway\n"); + list_for_each_entry_safe(iser_conn, n, &ig.connlist, + conn_list) { + iser_conn_release(iser_conn); + } + } + + iscsi_unregister_transport(&iscsi_iser_transport); + kmem_cache_destroy(ig.desc_cache); +} + +module_init(iser_init); +module_exit(iser_exit); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h new file mode 100644 index 000000000..a7aeaa0c6 --- /dev/null +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -0,0 +1,716 @@ +/* + * iSER transport for the Open iSCSI Initiator & iSER transport internals + * + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 Mike Christie + * based on code maintained by open-iscsi@googlegroups.com + * + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ISCSI_ISER_H__ +#define __ISCSI_ISER_H__ + +#include <linux/types.h> +#include <linux/net.h> +#include <linux/printk.h> +#include <scsi/libiscsi.h> +#include <scsi/scsi_transport_iscsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/iser.h> + +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/mutex.h> +#include <linux/mempool.h> +#include <linux/uio.h> + +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_fmr_pool.h> +#include <rdma/rdma_cm.h> + +#define DRV_NAME "iser" +#define PFX DRV_NAME ": " +#define DRV_VER "1.6" + +#define iser_dbg(fmt, arg...) \ + do { \ + if (unlikely(iser_debug_level > 2)) \ + printk(KERN_DEBUG PFX "%s: " fmt,\ + __func__ , ## arg); \ + } while (0) + +#define iser_warn(fmt, arg...) \ + do { \ + if (unlikely(iser_debug_level > 0)) \ + pr_warn(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define iser_info(fmt, arg...) \ + do { \ + if (unlikely(iser_debug_level > 1)) \ + pr_info(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define iser_err(fmt, arg...) \ + pr_err(PFX "%s: " fmt, __func__ , ## arg) + +#define SHIFT_4K 12 +#define SIZE_4K (1ULL << SHIFT_4K) +#define MASK_4K (~(SIZE_4K-1)) + +/* Default support is 512KB I/O size */ +#define ISER_DEF_MAX_SECTORS 1024 +#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K) +/* Maximum support is 8MB I/O size */ +#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K) + +#define ISER_DEF_XMIT_CMDS_DEFAULT 512 +#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT + #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX +#else + #define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT +#endif +#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX + +/* QP settings */ +/* Maximal bounds on received asynchronous PDUs */ +#define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */ + +#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * + * SCSI_TMFUNC(2), LOGOUT(1) */ + +#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX) + +#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2) + +/* the max TX (send) WR supported by the iSER QP is defined by * + * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * + * to have at max for SCSI command. The tx posting & completion handling code * + * supports -EAGAIN scheme where tx is suspended till the QP has room for more * + * send WR. D=8 comes from 64K/8K */ + +#define ISER_INFLIGHT_DATAOUTS 8 + +#define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \ + (1 + ISER_INFLIGHT_DATAOUTS) + \ + ISER_MAX_TX_MISC_PDUS + \ + ISER_MAX_RX_MISC_PDUS) + +/* Max registration work requests per command */ +#define ISER_MAX_REG_WR_PER_CMD 5 + +/* For Signature we don't support DATAOUTs so no need to make room for them */ +#define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \ + (1 + ISER_MAX_REG_WR_PER_CMD) + \ + ISER_MAX_TX_MISC_PDUS + \ + ISER_MAX_RX_MISC_PDUS) + +#define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr \ + - ISER_MAX_TX_MISC_PDUS \ + - ISER_MAX_RX_MISC_PDUS) / \ + (1 + ISER_INFLIGHT_DATAOUTS)) + +#define ISER_SIGNAL_CMD_COUNT 32 + +/* Constant PDU lengths calculations */ +#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr)) + +#define ISER_RECV_DATA_SEG_LEN 128 +#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN) +#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN) + +/* Length of an object name string */ +#define ISER_OBJECT_NAME_SIZE 64 + +enum iser_conn_state { + ISER_CONN_INIT, /* descriptor allocd, no conn */ + ISER_CONN_PENDING, /* in the process of being established */ + ISER_CONN_UP, /* up and running */ + ISER_CONN_TERMINATING, /* in the process of being terminated */ + ISER_CONN_DOWN, /* shut down */ + ISER_CONN_STATES_NUM +}; + +enum iser_task_status { + ISER_TASK_STATUS_INIT = 0, + ISER_TASK_STATUS_STARTED, + ISER_TASK_STATUS_COMPLETED +}; + +enum iser_data_dir { + ISER_DIR_IN = 0, /* to initiator */ + ISER_DIR_OUT, /* from initiator */ + ISER_DIRS_NUM +}; + +/** + * struct iser_data_buf - iSER data buffer + * + * @sg: pointer to the sg list + * @size: num entries of this sg + * @data_len: total beffer byte len + * @dma_nents: returned by dma_map_sg + */ +struct iser_data_buf { + struct scatterlist *sg; + int size; + unsigned long data_len; + int dma_nents; +}; + +/* fwd declarations */ +struct iser_device; +struct iscsi_iser_task; +struct iscsi_endpoint; +struct iser_reg_resources; + +/** + * struct iser_mem_reg - iSER memory registration info + * + * @sge: memory region sg element + * @rkey: memory region remote key + * @mem_h: pointer to registration context (FMR/Fastreg) + */ +struct iser_mem_reg { + struct ib_sge sge; + u32 rkey; + void *mem_h; +}; + +enum iser_desc_type { + ISCSI_TX_CONTROL , + ISCSI_TX_SCSI_COMMAND, + ISCSI_TX_DATAOUT +}; + +/* Maximum number of work requests per task: + * Data memory region local invalidate + fast registration + * Protection memory region local invalidate + fast registration + * Signature memory region local invalidate + fast registration + * PDU send + */ +#define ISER_MAX_WRS 7 + +/** + * struct iser_tx_desc - iSER TX descriptor + * + * @iser_header: iser header + * @iscsi_header: iscsi header + * @type: command/control/dataout + * @dam_addr: header buffer dma_address + * @tx_sg: sg[0] points to iser/iscsi headers + * sg[1] optionally points to either of immediate data + * unsolicited data-out or control + * @num_sge: number sges used on this TX task + * @mapped: Is the task header mapped + * @wr_idx: Current WR index + * @wrs: Array of WRs per task + * @data_reg: Data buffer registration details + * @prot_reg: Protection buffer registration details + * @sig_attrs: Signature attributes + */ +struct iser_tx_desc { + struct iser_ctrl iser_header; + struct iscsi_hdr iscsi_header; + enum iser_desc_type type; + u64 dma_addr; + struct ib_sge tx_sg[2]; + int num_sge; + struct ib_cqe cqe; + bool mapped; + u8 wr_idx; + union iser_wr { + struct ib_send_wr send; + struct ib_reg_wr fast_reg; + struct ib_sig_handover_wr sig; + } wrs[ISER_MAX_WRS]; + struct iser_mem_reg data_reg; + struct iser_mem_reg prot_reg; + struct ib_sig_attrs sig_attrs; +}; + +#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ + sizeof(u64) + sizeof(struct ib_sge) + \ + sizeof(struct ib_cqe))) +/** + * struct iser_rx_desc - iSER RX descriptor + * + * @iser_header: iser header + * @iscsi_header: iscsi header + * @data: received data segment + * @dma_addr: receive buffer dma address + * @rx_sg: ib_sge of receive buffer + * @pad: for sense data TODO: Modify to maximum sense length supported + */ +struct iser_rx_desc { + struct iser_ctrl iser_header; + struct iscsi_hdr iscsi_header; + char data[ISER_RECV_DATA_SEG_LEN]; + u64 dma_addr; + struct ib_sge rx_sg; + struct ib_cqe cqe; + char pad[ISER_RX_PAD_SIZE]; +} __packed; + +/** + * struct iser_login_desc - iSER login descriptor + * + * @req: pointer to login request buffer + * @resp: pointer to login response buffer + * @req_dma: DMA address of login request buffer + * @rsp_dma: DMA address of login response buffer + * @sge: IB sge for login post recv + * @cqe: completion handler + */ +struct iser_login_desc { + void *req; + void *rsp; + u64 req_dma; + u64 rsp_dma; + struct ib_sge sge; + struct ib_cqe cqe; +} __attribute__((packed)); + +struct iser_conn; +struct ib_conn; +struct iscsi_iser_task; + +/** + * struct iser_comp - iSER completion context + * + * @cq: completion queue + * @active_qps: Number of active QPs attached + * to completion context + */ +struct iser_comp { + struct ib_cq *cq; + int active_qps; +}; + +/** + * struct iser_device - Memory registration operations + * per-device registration schemes + * + * @alloc_reg_res: Allocate registration resources + * @free_reg_res: Free registration resources + * @fast_reg_mem: Register memory buffers + * @unreg_mem: Un-register memory buffers + * @reg_desc_get: Get a registration descriptor for pool + * @reg_desc_put: Get a registration descriptor to pool + */ +struct iser_reg_ops { + int (*alloc_reg_res)(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); + void (*free_reg_res)(struct ib_conn *ib_conn); + int (*reg_mem)(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg); + void (*unreg_mem)(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); + struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn); + void (*reg_desc_put)(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +}; + +/** + * struct iser_device - iSER device handle + * + * @ib_device: RDMA device + * @pd: Protection Domain for this device + * @mr: Global DMA memory region + * @event_handler: IB events handle routine + * @ig_list: entry in devices list + * @refcount: Reference counter, dominated by open iser connections + * @comps_used: Number of completion contexts used, Min between online + * cpus and device max completion vectors + * @comps: Dinamically allocated array of completion handlers + * @reg_ops: Registration ops + * @remote_inv_sup: Remote invalidate is supported on this device + */ +struct iser_device { + struct ib_device *ib_device; + struct ib_pd *pd; + struct ib_event_handler event_handler; + struct list_head ig_list; + int refcount; + int comps_used; + struct iser_comp *comps; + const struct iser_reg_ops *reg_ops; + bool remote_inv_sup; +}; + +/** + * struct iser_reg_resources - Fast registration recources + * + * @mr: memory region + * @fmr_pool: pool of fmrs + * @page_vec: fast reg page list used by fmr pool + * @mr_valid: is mr valid indicator + */ +struct iser_reg_resources { + union { + struct ib_mr *mr; + struct ib_fmr_pool *fmr_pool; + }; + struct iser_page_vec *page_vec; + u8 mr_valid:1; +}; + +/** + * struct iser_pi_context - Protection information context + * + * @rsc: protection buffer registration resources + * @sig_mr: signature enable memory region + * @sig_mr_valid: is sig_mr valid indicator + * @sig_protected: is region protected indicator + */ +struct iser_pi_context { + struct iser_reg_resources rsc; + struct ib_mr *sig_mr; + u8 sig_mr_valid:1; + u8 sig_protected:1; +}; + +/** + * struct iser_fr_desc - Fast registration descriptor + * + * @list: entry in connection fastreg pool + * @rsc: data buffer registration resources + * @pi_ctx: protection information context + */ +struct iser_fr_desc { + struct list_head list; + struct iser_reg_resources rsc; + struct iser_pi_context *pi_ctx; + struct list_head all_list; +}; + +/** + * struct iser_fr_pool: connection fast registration pool + * + * @list: list of fastreg descriptors + * @lock: protects fmr/fastreg pool + * @size: size of the pool + */ +struct iser_fr_pool { + struct list_head list; + spinlock_t lock; + int size; + struct list_head all_list; +}; + +/** + * struct ib_conn - Infiniband related objects + * + * @cma_id: rdma_cm connection maneger handle + * @qp: Connection Queue-pair + * @post_recv_buf_count: post receive counter + * @sig_count: send work request signal count + * @rx_wr: receive work request for batch posts + * @device: reference to iser device + * @comp: iser completion context + * @fr_pool: connection fast registration poool + * @pi_support: Indicate device T10-PI support + */ +struct ib_conn { + struct rdma_cm_id *cma_id; + struct ib_qp *qp; + int post_recv_buf_count; + u8 sig_count; + struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; + struct iser_device *device; + struct iser_comp *comp; + struct iser_fr_pool fr_pool; + bool pi_support; + struct ib_cqe reg_cqe; +}; + +/** + * struct iser_conn - iSER connection context + * + * @ib_conn: connection RDMA resources + * @iscsi_conn: link to matching iscsi connection + * @ep: transport handle + * @state: connection logical state + * @qp_max_recv_dtos: maximum number of data outs, corresponds + * to max number of post recvs + * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1) + * @min_posted_rx: (qp_max_recv_dtos >> 2) + * @max_cmds: maximum cmds allowed for this connection + * @name: connection peer portal + * @release_work: deffered work for release job + * @state_mutex: protects iser onnection state + * @stop_completion: conn_stop completion + * @ib_completion: RDMA cleanup completion + * @up_completion: connection establishment completed + * (state is ISER_CONN_UP) + * @conn_list: entry in ig conn list + * @login_desc: login descriptor + * @rx_desc_head: head of rx_descs cyclic buffer + * @rx_descs: rx buffers array (cyclic buffer) + * @num_rx_descs: number of rx descriptors + * @scsi_sg_tablesize: scsi host sg_tablesize + * @pages_per_mr: maximum pages available for registration + */ +struct iser_conn { + struct ib_conn ib_conn; + struct iscsi_conn *iscsi_conn; + struct iscsi_endpoint *ep; + enum iser_conn_state state; + unsigned qp_max_recv_dtos; + unsigned qp_max_recv_dtos_mask; + unsigned min_posted_rx; + u16 max_cmds; + char name[ISER_OBJECT_NAME_SIZE]; + struct work_struct release_work; + struct mutex state_mutex; + struct completion stop_completion; + struct completion ib_completion; + struct completion up_completion; + struct list_head conn_list; + struct iser_login_desc login_desc; + unsigned int rx_desc_head; + struct iser_rx_desc *rx_descs; + u32 num_rx_descs; + unsigned short scsi_sg_tablesize; + unsigned short pages_per_mr; + bool snd_w_inv; +}; + +/** + * struct iscsi_iser_task - iser task context + * + * @desc: TX descriptor + * @iser_conn: link to iser connection + * @status: current task status + * @sc: link to scsi command + * @command_sent: indicate if command was sent + * @dir: iser data direction + * @rdma_reg: task rdma registration desc + * @data: iser data buffer desc + * @prot: iser protection buffer desc + */ +struct iscsi_iser_task { + struct iser_tx_desc desc; + struct iser_conn *iser_conn; + enum iser_task_status status; + struct scsi_cmnd *sc; + int command_sent; + int dir[ISER_DIRS_NUM]; + struct iser_mem_reg rdma_reg[ISER_DIRS_NUM]; + struct iser_data_buf data[ISER_DIRS_NUM]; + struct iser_data_buf prot[ISER_DIRS_NUM]; +}; + +struct iser_page_vec { + u64 *pages; + int npages; + struct ib_mr fake_mr; +}; + +/** + * struct iser_global: iSER global context + * + * @device_list_mutex: protects device_list + * @device_list: iser devices global list + * @connlist_mutex: protects connlist + * @connlist: iser connections global list + * @desc_cache: kmem cache for tx dataout + */ +struct iser_global { + struct mutex device_list_mutex; + struct list_head device_list; + struct mutex connlist_mutex; + struct list_head connlist; + struct kmem_cache *desc_cache; +}; + +extern struct iser_global ig; +extern int iser_debug_level; +extern bool iser_pi_enable; +extern int iser_pi_guard; +extern unsigned int iser_max_sectors; +extern bool iser_always_reg; + +int iser_assign_reg_ops(struct iser_device *device); + +int iser_send_control(struct iscsi_conn *conn, + struct iscsi_task *task); + +int iser_send_command(struct iscsi_conn *conn, + struct iscsi_task *task); + +int iser_send_data_out(struct iscsi_conn *conn, + struct iscsi_task *task, + struct iscsi_data *hdr); + +void iscsi_iser_recv(struct iscsi_conn *conn, + struct iscsi_hdr *hdr, + char *rx_data, + int rx_data_len); + +void iser_conn_init(struct iser_conn *iser_conn); + +void iser_conn_release(struct iser_conn *iser_conn); + +int iser_conn_terminate(struct iser_conn *iser_conn); + +void iser_release_work(struct work_struct *work); + +void iser_err_comp(struct ib_wc *wc, const char *type); +void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc); +void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc); +void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc); +void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc); +void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc); +void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc); + +void iser_task_rdma_init(struct iscsi_iser_task *task); + +void iser_task_rdma_finalize(struct iscsi_iser_task *task); + +void iser_free_rx_descriptors(struct iser_conn *iser_conn); + +void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + enum iser_data_dir cmd_dir); + +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir, + bool all_imm); +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir); + +int iser_connect(struct iser_conn *iser_conn, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, + int non_blocking); + +void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); +void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); + +int iser_post_recvl(struct iser_conn *iser_conn); +int iser_post_recvm(struct iser_conn *iser_conn, int count); +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, + bool signal); + +int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum iser_data_dir iser_dir, + enum dma_data_direction dma_dir); + +void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum dma_data_direction dir); + +int iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc); +int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, + struct iscsi_session *session); +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); +void iser_free_fmr_pool(struct ib_conn *ib_conn); +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); +void iser_free_fastreg_pool(struct ib_conn *ib_conn); +u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir, sector_t *sector); +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn); +void +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn); +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); + +static inline struct ib_send_wr * +iser_tx_next_wr(struct iser_tx_desc *tx_desc) +{ + struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send; + struct ib_send_wr *last_wr; + + if (tx_desc->wr_idx) { + last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send; + last_wr->next = cur_wr; + } + tx_desc->wr_idx++; + + return cur_wr; +} + +static inline struct iser_conn * +to_iser_conn(struct ib_conn *ib_conn) +{ + return container_of(ib_conn, struct iser_conn, ib_conn); +} + +static inline struct iser_rx_desc * +iser_rx(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_rx_desc, cqe); +} + +static inline struct iser_tx_desc * +iser_tx(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_tx_desc, cqe); +} + +static inline struct iser_login_desc * +iser_login(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_login_desc, cqe); +} + +#endif diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c new file mode 100644 index 000000000..96af06cfe --- /dev/null +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -0,0 +1,784 @@ +/* + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/kfifo.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> + +#include "iscsi_iser.h" + +/* Register user buffer memory and initialize passive rdma + * dto descriptor. Data size is stored in + * task->data[ISER_DIR_IN].data_len, Protection size + * os stored in task->prot[ISER_DIR_IN].data_len + */ +static int iser_prepare_read_cmd(struct iscsi_task *task) + +{ + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_mem_reg *mem_reg; + int err; + struct iser_ctrl *hdr = &iser_task->desc.iser_header; + struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; + + err = iser_dma_map_task_data(iser_task, + buf_in, + ISER_DIR_IN, + DMA_FROM_DEVICE); + if (err) + return err; + + if (scsi_prot_sg_count(iser_task->sc)) { + struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN]; + + err = iser_dma_map_task_data(iser_task, + pbuf_in, + ISER_DIR_IN, + DMA_FROM_DEVICE); + if (err) + return err; + } + + err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false); + if (err) { + iser_err("Failed to set up Data-IN RDMA\n"); + return err; + } + mem_reg = &iser_task->rdma_reg[ISER_DIR_IN]; + + hdr->flags |= ISER_RSV; + hdr->read_stag = cpu_to_be32(mem_reg->rkey); + hdr->read_va = cpu_to_be64(mem_reg->sge.addr); + + iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", + task->itt, mem_reg->rkey, + (unsigned long long)mem_reg->sge.addr); + + return 0; +} + +/* Register user buffer memory and initialize passive rdma + * dto descriptor. Data size is stored in + * task->data[ISER_DIR_OUT].data_len, Protection size + * is stored at task->prot[ISER_DIR_OUT].data_len + */ +static int +iser_prepare_write_cmd(struct iscsi_task *task, + unsigned int imm_sz, + unsigned int unsol_sz, + unsigned int edtl) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_mem_reg *mem_reg; + int err; + struct iser_ctrl *hdr = &iser_task->desc.iser_header; + struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; + struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1]; + + err = iser_dma_map_task_data(iser_task, + buf_out, + ISER_DIR_OUT, + DMA_TO_DEVICE); + if (err) + return err; + + if (scsi_prot_sg_count(iser_task->sc)) { + struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT]; + + err = iser_dma_map_task_data(iser_task, + pbuf_out, + ISER_DIR_OUT, + DMA_TO_DEVICE); + if (err) + return err; + } + + err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT, + buf_out->data_len == imm_sz); + if (err != 0) { + iser_err("Failed to register write cmd RDMA mem\n"); + return err; + } + + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; + + if (unsol_sz < edtl) { + hdr->flags |= ISER_WSV; + if (buf_out->data_len > imm_sz) { + hdr->write_stag = cpu_to_be32(mem_reg->rkey); + hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); + } + + iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n", + task->itt, mem_reg->rkey, + (unsigned long long)mem_reg->sge.addr, unsol_sz); + } + + if (imm_sz > 0) { + iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", + task->itt, imm_sz); + tx_dsg->addr = mem_reg->sge.addr; + tx_dsg->length = imm_sz; + tx_dsg->lkey = mem_reg->sge.lkey; + iser_task->desc.num_sge = 2; + } + + return 0; +} + +/* creates a new tx descriptor and adds header regd buffer */ +static void iser_create_send_desc(struct iser_conn *iser_conn, + struct iser_tx_desc *tx_desc) +{ + struct iser_device *device = iser_conn->ib_conn.device; + + ib_dma_sync_single_for_cpu(device->ib_device, + tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); + + memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); + tx_desc->iser_header.flags = ISER_VER; + tx_desc->num_sge = 1; +} + +static void iser_free_login_buf(struct iser_conn *iser_conn) +{ + struct iser_device *device = iser_conn->ib_conn.device; + struct iser_login_desc *desc = &iser_conn->login_desc; + + if (!desc->req) + return; + + ib_dma_unmap_single(device->ib_device, desc->req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); + + ib_dma_unmap_single(device->ib_device, desc->rsp_dma, + ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); + + kfree(desc->req); + kfree(desc->rsp); + + /* make sure we never redo any unmapping */ + desc->req = NULL; + desc->rsp = NULL; +} + +static int iser_alloc_login_buf(struct iser_conn *iser_conn) +{ + struct iser_device *device = iser_conn->ib_conn.device; + struct iser_login_desc *desc = &iser_conn->login_desc; + + desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); + if (!desc->req) + return -ENOMEM; + + desc->req_dma = ib_dma_map_single(device->ib_device, desc->req, + ISCSI_DEF_MAX_RECV_SEG_LEN, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(device->ib_device, + desc->req_dma)) + goto free_req; + + desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); + if (!desc->rsp) + goto unmap_req; + + desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp, + ISER_RX_LOGIN_SIZE, + DMA_FROM_DEVICE); + if (ib_dma_mapping_error(device->ib_device, + desc->rsp_dma)) + goto free_rsp; + + return 0; + +free_rsp: + kfree(desc->rsp); +unmap_req: + ib_dma_unmap_single(device->ib_device, desc->req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, + DMA_TO_DEVICE); +free_req: + kfree(desc->req); + + return -ENOMEM; +} + +int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, + struct iscsi_session *session) +{ + int i, j; + u64 dma_addr; + struct iser_rx_desc *rx_desc; + struct ib_sge *rx_sg; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + iser_conn->qp_max_recv_dtos = session->cmds_max; + iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ + iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; + + if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max, + iser_conn->pages_per_mr)) + goto create_rdma_reg_res_failed; + + if (iser_alloc_login_buf(iser_conn)) + goto alloc_login_buf_fail; + + iser_conn->num_rx_descs = session->cmds_max; + iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs, + sizeof(struct iser_rx_desc), + GFP_KERNEL); + if (!iser_conn->rx_descs) + goto rx_desc_alloc_fail; + + rx_desc = iser_conn->rx_descs; + + for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) { + dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(device->ib_device, dma_addr)) + goto rx_desc_dma_map_failed; + + rx_desc->dma_addr = dma_addr; + rx_desc->cqe.done = iser_task_rsp; + rx_sg = &rx_desc->rx_sg; + rx_sg->addr = rx_desc->dma_addr; + rx_sg->length = ISER_RX_PAYLOAD_SIZE; + rx_sg->lkey = device->pd->local_dma_lkey; + } + + iser_conn->rx_desc_head = 0; + return 0; + +rx_desc_dma_map_failed: + rx_desc = iser_conn->rx_descs; + for (j = 0; j < i; j++, rx_desc++) + ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + kfree(iser_conn->rx_descs); + iser_conn->rx_descs = NULL; +rx_desc_alloc_fail: + iser_free_login_buf(iser_conn); +alloc_login_buf_fail: + device->reg_ops->free_reg_res(ib_conn); +create_rdma_reg_res_failed: + iser_err("failed allocating rx descriptors / data buffers\n"); + return -ENOMEM; +} + +void iser_free_rx_descriptors(struct iser_conn *iser_conn) +{ + int i; + struct iser_rx_desc *rx_desc; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + if (device->reg_ops->free_reg_res) + device->reg_ops->free_reg_res(ib_conn); + + rx_desc = iser_conn->rx_descs; + for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) + ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + kfree(iser_conn->rx_descs); + /* make sure we never redo any unmapping */ + iser_conn->rx_descs = NULL; + + iser_free_login_buf(iser_conn); +} + +static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iscsi_session *session = conn->session; + + iser_dbg("req op %x flags %x\n", req->opcode, req->flags); + /* check if this is the last login - going to full feature phase */ + if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) + return 0; + + /* + * Check that there is one posted recv buffer + * (for the last login response). + */ + WARN_ON(ib_conn->post_recv_buf_count != 1); + + if (session->discovery_sess) { + iser_info("Discovery session, re-using login RX buffer\n"); + return 0; + } else + iser_info("Normal session, posting batch of RX %d buffers\n", + iser_conn->min_posted_rx); + + /* Initial post receive buffers */ + if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx)) + return -ENOMEM; + + return 0; +} + +static inline bool iser_signal_comp(u8 sig_count) +{ + return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0); +} + +/** + * iser_send_command - send command PDU + */ +int iser_send_command(struct iscsi_conn *conn, + struct iscsi_task *task) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + unsigned long edtl; + int err; + struct iser_data_buf *data_buf, *prot_buf; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; + struct scsi_cmnd *sc = task->sc; + struct iser_tx_desc *tx_desc = &iser_task->desc; + u8 sig_count = ++iser_conn->ib_conn.sig_count; + + edtl = ntohl(hdr->data_length); + + /* build the tx desc regd header and add it to the tx desc dto */ + tx_desc->type = ISCSI_TX_SCSI_COMMAND; + tx_desc->cqe.done = iser_cmd_comp; + iser_create_send_desc(iser_conn, tx_desc); + + if (hdr->flags & ISCSI_FLAG_CMD_READ) { + data_buf = &iser_task->data[ISER_DIR_IN]; + prot_buf = &iser_task->prot[ISER_DIR_IN]; + } else { + data_buf = &iser_task->data[ISER_DIR_OUT]; + prot_buf = &iser_task->prot[ISER_DIR_OUT]; + } + + if (scsi_sg_count(sc)) { /* using a scatter list */ + data_buf->sg = scsi_sglist(sc); + data_buf->size = scsi_sg_count(sc); + } + data_buf->data_len = scsi_bufflen(sc); + + if (scsi_prot_sg_count(sc)) { + prot_buf->sg = scsi_prot_sglist(sc); + prot_buf->size = scsi_prot_sg_count(sc); + prot_buf->data_len = (data_buf->data_len >> + ilog2(sc->device->sector_size)) * 8; + } + + if (hdr->flags & ISCSI_FLAG_CMD_READ) { + err = iser_prepare_read_cmd(task); + if (err) + goto send_command_error; + } + if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { + err = iser_prepare_write_cmd(task, + task->imm_count, + task->imm_count + + task->unsol_r2t.data_length, + edtl); + if (err) + goto send_command_error; + } + + iser_task->status = ISER_TASK_STATUS_STARTED; + + err = iser_post_send(&iser_conn->ib_conn, tx_desc, + iser_signal_comp(sig_count)); + if (!err) + return 0; + +send_command_error: + iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); + return err; +} + +/** + * iser_send_data_out - send data out PDU + */ +int iser_send_data_out(struct iscsi_conn *conn, + struct iscsi_task *task, + struct iscsi_data *hdr) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_tx_desc *tx_desc; + struct iser_mem_reg *mem_reg; + unsigned long buf_offset; + unsigned long data_seg_len; + uint32_t itt; + int err; + struct ib_sge *tx_dsg; + + itt = (__force uint32_t)hdr->itt; + data_seg_len = ntoh24(hdr->dlength); + buf_offset = ntohl(hdr->offset); + + iser_dbg("%s itt %d dseg_len %d offset %d\n", + __func__,(int)itt,(int)data_seg_len,(int)buf_offset); + + tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC); + if (!tx_desc) + return -ENOMEM; + + tx_desc->type = ISCSI_TX_DATAOUT; + tx_desc->cqe.done = iser_dataout_comp; + tx_desc->iser_header.flags = ISER_VER; + memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); + + /* build the tx desc */ + err = iser_initialize_task_headers(task, tx_desc); + if (err) + goto send_data_out_error; + + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; + tx_dsg = &tx_desc->tx_sg[1]; + tx_dsg->addr = mem_reg->sge.addr + buf_offset; + tx_dsg->length = data_seg_len; + tx_dsg->lkey = mem_reg->sge.lkey; + tx_desc->num_sge = 2; + + if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { + iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n", + buf_offset, data_seg_len, + iser_task->data[ISER_DIR_OUT].data_len, itt); + err = -EINVAL; + goto send_data_out_error; + } + iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n", + itt, buf_offset, data_seg_len); + + + err = iser_post_send(&iser_conn->ib_conn, tx_desc, true); + if (!err) + return 0; + +send_data_out_error: + kmem_cache_free(ig.desc_cache, tx_desc); + iser_err("conn %p failed err %d\n", conn, err); + return err; +} + +int iser_send_control(struct iscsi_conn *conn, + struct iscsi_task *task) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_tx_desc *mdesc = &iser_task->desc; + unsigned long data_seg_len; + int err = 0; + struct iser_device *device; + + /* build the tx desc regd header and add it to the tx desc dto */ + mdesc->type = ISCSI_TX_CONTROL; + mdesc->cqe.done = iser_ctrl_comp; + iser_create_send_desc(iser_conn, mdesc); + + device = iser_conn->ib_conn.device; + + data_seg_len = ntoh24(task->hdr->dlength); + + if (data_seg_len > 0) { + struct iser_login_desc *desc = &iser_conn->login_desc; + struct ib_sge *tx_dsg = &mdesc->tx_sg[1]; + + if (task != conn->login_task) { + iser_err("data present on non login task!!!\n"); + goto send_control_error; + } + + ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma, + task->data_count, DMA_TO_DEVICE); + + memcpy(desc->req, task->data, task->data_count); + + ib_dma_sync_single_for_device(device->ib_device, desc->req_dma, + task->data_count, DMA_TO_DEVICE); + + tx_dsg->addr = desc->req_dma; + tx_dsg->length = task->data_count; + tx_dsg->lkey = device->pd->local_dma_lkey; + mdesc->num_sge = 2; + } + + if (task == conn->login_task) { + iser_dbg("op %x dsl %lx, posting login rx buffer\n", + task->hdr->opcode, data_seg_len); + err = iser_post_recvl(iser_conn); + if (err) + goto send_control_error; + err = iser_post_rx_bufs(conn, task->hdr); + if (err) + goto send_control_error; + } + + err = iser_post_send(&iser_conn->ib_conn, mdesc, true); + if (!err) + return 0; + +send_control_error: + iser_err("conn %p failed err %d\n",conn, err); + return err; +} + +void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct ib_conn *ib_conn = wc->qp->qp_context; + struct iser_conn *iser_conn = to_iser_conn(ib_conn); + struct iser_login_desc *desc = iser_login(wc->wr_cqe); + struct iscsi_hdr *hdr; + char *data; + int length; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + iser_err_comp(wc, "login_rsp"); + return; + } + + ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, + desc->rsp_dma, ISER_RX_LOGIN_SIZE, + DMA_FROM_DEVICE); + + hdr = desc->rsp + sizeof(struct iser_ctrl); + data = desc->rsp + ISER_HEADERS_LEN; + length = wc->byte_len - ISER_HEADERS_LEN; + + iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, + hdr->itt, length); + + iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length); + + ib_dma_sync_single_for_device(ib_conn->device->ib_device, + desc->rsp_dma, ISER_RX_LOGIN_SIZE, + DMA_FROM_DEVICE); + + ib_conn->post_recv_buf_count--; +} + +static inline int +iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) +{ + if (likely(rkey == desc->rsc.mr->rkey)) { + desc->rsc.mr_valid = 0; + } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) { + desc->pi_ctx->sig_mr_valid = 0; + } else { + iser_err("Bogus remote invalidation for rkey %#x\n", rkey); + return -EINVAL; + } + + return 0; +} + +static int +iser_check_remote_inv(struct iser_conn *iser_conn, + struct ib_wc *wc, + struct iscsi_hdr *hdr) +{ + if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { + struct iscsi_task *task; + u32 rkey = wc->ex.invalidate_rkey; + + iser_dbg("conn %p: remote invalidation for rkey %#x\n", + iser_conn, rkey); + + if (unlikely(!iser_conn->snd_w_inv)) { + iser_err("conn %p: unexpected remote invalidation, terminating connection\n", + iser_conn); + return -EPROTO; + } + + task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt); + if (likely(task)) { + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_fr_desc *desc; + + if (iser_task->dir[ISER_DIR_IN]) { + desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h; + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; + } + + if (iser_task->dir[ISER_DIR_OUT]) { + desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h; + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; + } + } else { + iser_err("failed to get task for itt=%d\n", hdr->itt); + return -EINVAL; + } + } + + return 0; +} + + +void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct ib_conn *ib_conn = wc->qp->qp_context; + struct iser_conn *iser_conn = to_iser_conn(ib_conn); + struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); + struct iscsi_hdr *hdr; + int length; + int outstanding, count, err; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + iser_err_comp(wc, "task_rsp"); + return; + } + + ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, + desc->dma_addr, ISER_RX_PAYLOAD_SIZE, + DMA_FROM_DEVICE); + + hdr = &desc->iscsi_header; + length = wc->byte_len - ISER_HEADERS_LEN; + + iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, + hdr->itt, length); + + if (iser_check_remote_inv(iser_conn, wc, hdr)) { + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + return; + } + + iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length); + + ib_dma_sync_single_for_device(ib_conn->device->ib_device, + desc->dma_addr, ISER_RX_PAYLOAD_SIZE, + DMA_FROM_DEVICE); + + /* decrementing conn->post_recv_buf_count only --after-- freeing the * + * task eliminates the need to worry on tasks which are completed in * + * parallel to the execution of iser_conn_term. So the code that waits * + * for the posted rx bufs refcount to become zero handles everything */ + ib_conn->post_recv_buf_count--; + + outstanding = ib_conn->post_recv_buf_count; + if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { + count = min(iser_conn->qp_max_recv_dtos - outstanding, + iser_conn->min_posted_rx); + err = iser_post_recvm(iser_conn, count); + if (err) + iser_err("posting %d rx bufs err %d\n", count, err); + } +} + +void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + if (unlikely(wc->status != IB_WC_SUCCESS)) + iser_err_comp(wc, "command"); +} + +void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); + struct iscsi_task *task; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + iser_err_comp(wc, "control"); + return; + } + + /* this arithmetic is legal by libiscsi dd_data allocation */ + task = (void *)desc - sizeof(struct iscsi_task); + if (task->hdr->itt == RESERVED_ITT) + iscsi_put_task(task); +} + +void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); + struct ib_conn *ib_conn = wc->qp->qp_context; + struct iser_device *device = ib_conn->device; + + if (unlikely(wc->status != IB_WC_SUCCESS)) + iser_err_comp(wc, "dataout"); + + ib_dma_unmap_single(device->ib_device, desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + kmem_cache_free(ig.desc_cache, desc); +} + +void iser_task_rdma_init(struct iscsi_iser_task *iser_task) + +{ + iser_task->status = ISER_TASK_STATUS_INIT; + + iser_task->dir[ISER_DIR_IN] = 0; + iser_task->dir[ISER_DIR_OUT] = 0; + + iser_task->data[ISER_DIR_IN].data_len = 0; + iser_task->data[ISER_DIR_OUT].data_len = 0; + + iser_task->prot[ISER_DIR_IN].data_len = 0; + iser_task->prot[ISER_DIR_OUT].data_len = 0; + + memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, + sizeof(struct iser_mem_reg)); + memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, + sizeof(struct iser_mem_reg)); +} + +void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) +{ + int prot_count = scsi_prot_sg_count(iser_task->sc); + + if (iser_task->dir[ISER_DIR_IN]) { + iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_IN], + DMA_FROM_DEVICE); + if (prot_count) + iser_dma_unmap_task_data(iser_task, + &iser_task->prot[ISER_DIR_IN], + DMA_FROM_DEVICE); + } + + if (iser_task->dir[ISER_DIR_OUT]) { + iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_OUT], + DMA_TO_DEVICE); + if (prot_count) + iser_dma_unmap_task_data(iser_task, + &iser_task->prot[ISER_DIR_OUT], + DMA_TO_DEVICE); + } +} diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c new file mode 100644 index 000000000..379bc0dfc --- /dev/null +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/highmem.h> +#include <linux/scatterlist.h> + +#include "iscsi_iser.h" +static +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); +static +int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); + +static const struct iser_reg_ops fastreg_ops = { + .alloc_reg_res = iser_alloc_fastreg_pool, + .free_reg_res = iser_free_fastreg_pool, + .reg_mem = iser_fast_reg_mr, + .unreg_mem = iser_unreg_mem_fastreg, + .reg_desc_get = iser_reg_desc_get_fr, + .reg_desc_put = iser_reg_desc_put_fr, +}; + +static const struct iser_reg_ops fmr_ops = { + .alloc_reg_res = iser_alloc_fmr_pool, + .free_reg_res = iser_free_fmr_pool, + .reg_mem = iser_fast_reg_fmr, + .unreg_mem = iser_unreg_mem_fmr, + .reg_desc_get = iser_reg_desc_get_fmr, + .reg_desc_put = iser_reg_desc_put_fmr, +}; + +void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + iser_err_comp(wc, "memreg"); +} + +int iser_assign_reg_ops(struct iser_device *device) +{ + struct ib_device *ib_dev = device->ib_device; + + /* Assign function handles - based on FMR support */ + if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr && + ib_dev->map_phys_fmr && ib_dev->unmap_fmr) { + iser_info("FMR supported, using FMR for registration\n"); + device->reg_ops = &fmr_ops; + } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + iser_info("FastReg supported, using FastReg for registration\n"); + device->reg_ops = &fastreg_ops; + device->remote_inv_sup = iser_always_reg; + } else { + iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); + return -1; + } + + return 0; +} + +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + unsigned long flags; + + spin_lock_irqsave(&fr_pool->lock, flags); + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); + list_del(&desc->list); + spin_unlock_irqrestore(&fr_pool->lock, flags); + + return desc; +} + +void +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + unsigned long flags; + + spin_lock_irqsave(&fr_pool->lock, flags); + list_add(&desc->list, &fr_pool->list); + spin_unlock_irqrestore(&fr_pool->lock, flags); +} + +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + + return list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); +} + +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) +{ +} + +static void iser_data_buf_dump(struct iser_data_buf *data, + struct ib_device *ibdev) +{ + struct scatterlist *sg; + int i; + + for_each_sg(data->sg, sg, data->dma_nents, i) + iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " + "off:0x%x sz:0x%x dma_len:0x%x\n", + i, (unsigned long)ib_sg_dma_address(ibdev, sg), + sg_page(sg), sg->offset, + sg->length, ib_sg_dma_len(ibdev, sg)); +} + +static void iser_dump_page_vec(struct iser_page_vec *page_vec) +{ + int i; + + iser_err("page vec npages %d data length %lld\n", + page_vec->npages, page_vec->fake_mr.length); + for (i = 0; i < page_vec->npages; i++) + iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); +} + +int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum iser_data_dir iser_dir, + enum dma_data_direction dma_dir) +{ + struct ib_device *dev; + + iser_task->dir[iser_dir] = 1; + dev = iser_task->iser_conn->ib_conn.device->ib_device; + + data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir); + if (data->dma_nents == 0) { + iser_err("dma_map_sg failed!!!\n"); + return -EINVAL; + } + return 0; +} + +void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum dma_data_direction dir) +{ + struct ib_device *dev; + + dev = iser_task->iser_conn->ib_conn.device->ib_device; + ib_dma_unmap_sg(dev, data->sg, data->size, dir); +} + +static int +iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, + struct iser_mem_reg *reg) +{ + struct scatterlist *sg = mem->sg; + + reg->sge.lkey = device->pd->local_dma_lkey; + /* + * FIXME: rework the registration code path to differentiate + * rkey/lkey use cases + */ + + if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) + reg->rkey = device->pd->unsafe_global_rkey; + else + reg->rkey = 0; + reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); + reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); + + iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx," + " length=0x%x\n", reg->sge.lkey, reg->rkey, + reg->sge.addr, reg->sge.length); + + return 0; +} + +static int iser_set_page(struct ib_mr *mr, u64 addr) +{ + struct iser_page_vec *page_vec = + container_of(mr, struct iser_page_vec, fake_mr); + + page_vec->pages[page_vec->npages++] = addr; + + return 0; +} + +static +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg) +{ + struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + struct iser_page_vec *page_vec = rsc->page_vec; + struct ib_fmr_pool *fmr_pool = rsc->fmr_pool; + struct ib_pool_fmr *fmr; + int ret, plen; + + page_vec->npages = 0; + page_vec->fake_mr.page_size = SIZE_4K; + plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg, + mem->dma_nents, NULL, iser_set_page); + if (unlikely(plen < mem->dma_nents)) { + iser_err("page vec too short to hold this SG\n"); + iser_data_buf_dump(mem, device->ib_device); + iser_dump_page_vec(page_vec); + return -EINVAL; + } + + fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages, + page_vec->npages, page_vec->pages[0]); + if (IS_ERR(fmr)) { + ret = PTR_ERR(fmr); + iser_err("ib_fmr_pool_map_phys failed: %d\n", ret); + return ret; + } + + reg->sge.lkey = fmr->fmr->lkey; + reg->rkey = fmr->fmr->rkey; + reg->sge.addr = page_vec->fake_mr.iova; + reg->sge.length = page_vec->fake_mr.length; + reg->mem_h = fmr; + + iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx," + " length=0x%x\n", reg->sge.lkey, reg->rkey, + reg->sge.addr, reg->sge.length); + + return 0; +} + +/** + * Unregister (previosuly registered using FMR) memory. + * If memory is non-FMR does nothing. + */ +void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir) +{ + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + int ret; + + if (!reg->mem_h) + return; + + iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h); + + ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); + if (ret) + iser_err("ib_fmr_pool_unmap failed %d\n", ret); + + reg->mem_h = NULL; +} + +void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir) +{ + struct iser_device *device = iser_task->iser_conn->ib_conn.device; + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + + if (!reg->mem_h) + return; + + device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, + reg->mem_h); + reg->mem_h = NULL; +} + +static void +iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, + struct ib_sig_domain *domain) +{ + domain->sig_type = IB_SIG_TYPE_T10_DIF; + domain->sig.dif.pi_interval = scsi_prot_interval(sc); + domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request); + /* + * At the moment we hard code those, but in the future + * we will take them from sc. + */ + domain->sig.dif.apptag_check_mask = 0xffff; + domain->sig.dif.app_escape = true; + domain->sig.dif.ref_escape = true; + if (sc->prot_flags & SCSI_PROT_REF_INCREMENT) + domain->sig.dif.ref_remap = true; +}; + +static int +iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) +{ + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_STRIP: + sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); + sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? + IB_T10DIF_CSUM : IB_T10DIF_CRC; + break; + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); + sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? + IB_T10DIF_CSUM : IB_T10DIF_CRC; + break; + default: + iser_err("Unsupported PI operation %d\n", + scsi_get_prot_op(sc)); + return -EINVAL; + } + + return 0; +} + +static inline void +iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) +{ + *mask = 0; + if (sc->prot_flags & SCSI_PROT_REF_CHECK) + *mask |= IB_SIG_CHECK_REFTAG; + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) + *mask |= IB_SIG_CHECK_GUARD; +} + +static inline void +iser_inv_rkey(struct ib_send_wr *inv_wr, + struct ib_mr *mr, + struct ib_cqe *cqe) +{ + inv_wr->opcode = IB_WR_LOCAL_INV; + inv_wr->wr_cqe = cqe; + inv_wr->ex.invalidate_rkey = mr->rkey; + inv_wr->send_flags = 0; + inv_wr->num_sge = 0; +} + +static int +iser_reg_sig_mr(struct iscsi_iser_task *iser_task, + struct iser_pi_context *pi_ctx, + struct iser_mem_reg *data_reg, + struct iser_mem_reg *prot_reg, + struct iser_mem_reg *sig_reg) +{ + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; + struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; + struct ib_sig_handover_wr *wr; + struct ib_mr *mr = pi_ctx->sig_mr; + int ret; + + memset(sig_attrs, 0, sizeof(*sig_attrs)); + ret = iser_set_sig_attrs(iser_task->sc, sig_attrs); + if (ret) + goto err; + + iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); + + if (pi_ctx->sig_mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); + + ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); + + wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr, + wr); + wr->wr.opcode = IB_WR_REG_SIG_MR; + wr->wr.wr_cqe = cqe; + wr->wr.sg_list = &data_reg->sge; + wr->wr.num_sge = 1; + wr->wr.send_flags = 0; + wr->sig_attrs = sig_attrs; + wr->sig_mr = mr; + if (scsi_prot_sg_count(iser_task->sc)) + wr->prot = &prot_reg->sge; + else + wr->prot = NULL; + wr->access_flags = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + pi_ctx->sig_mr_valid = 1; + + sig_reg->sge.lkey = mr->lkey; + sig_reg->rkey = mr->rkey; + sig_reg->sge.addr = 0; + sig_reg->sge.length = scsi_transfer_length(iser_task->sc); + + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", + sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, + sig_reg->sge.length); +err: + return ret; +} + +static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg) +{ + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; + struct ib_mr *mr = rsc->mr; + struct ib_reg_wr *wr; + int n; + + if (rsc->mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); + + ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); + + n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K); + if (unlikely(n != mem->dma_nents)) { + iser_err("failed to map sg (%d/%d)\n", + n, mem->dma_nents); + return n < 0 ? n : -EINVAL; + } + + wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr); + wr->wr.opcode = IB_WR_REG_MR; + wr->wr.wr_cqe = cqe; + wr->wr.send_flags = 0; + wr->wr.num_sge = 0; + wr->mr = mr; + wr->key = mr->rkey; + wr->access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + rsc->mr_valid = 1; + + reg->sge.lkey = mr->lkey; + reg->rkey = mr->rkey; + reg->sge.addr = mr->iova; + reg->sge.length = mr->length; + + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n", + reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length); + + return 0; +} + +static int +iser_reg_prot_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); +} + +static int +iser_reg_data_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); +} + +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir, + bool all_imm) +{ + struct ib_conn *ib_conn = &task->iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + struct iser_data_buf *mem = &task->data[dir]; + struct iser_mem_reg *reg = &task->rdma_reg[dir]; + struct iser_mem_reg *data_reg; + struct iser_fr_desc *desc = NULL; + bool use_dma_key; + int err; + + use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL; + + if (!use_dma_key) { + desc = device->reg_ops->reg_desc_get(ib_conn); + reg->mem_h = desc; + } + + if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) + data_reg = reg; + else + data_reg = &task->desc.data_reg; + + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); + if (unlikely(err)) + goto err_reg; + + if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + struct iser_mem_reg *prot_reg = &task->desc.prot_reg; + + if (scsi_prot_sg_count(task->sc)) { + mem = &task->prot[dir]; + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); + if (unlikely(err)) + goto err_reg; + } + + err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg, + prot_reg, reg); + if (unlikely(err)) + goto err_reg; + + desc->pi_ctx->sig_protected = 1; + } + + return 0; + +err_reg: + if (desc) + device->reg_ops->reg_desc_put(ib_conn, desc); + + return err; +} + +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + device->reg_ops->unreg_mem(task, dir); +} diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c new file mode 100644 index 000000000..bee8c0b1d --- /dev/null +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -0,0 +1,1174 @@ +/* + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/delay.h> + +#include "iscsi_iser.h" + +#define ISCSI_ISER_MAX_CONN 8 +#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \ + ISCSI_ISER_MAX_CONN) + +static void iser_qp_event_callback(struct ib_event *cause, void *context) +{ + iser_err("qp event %s (%d)\n", + ib_event_msg(cause->event), cause->event); +} + +static void iser_event_handler(struct ib_event_handler *handler, + struct ib_event *event) +{ + iser_err("async event %s (%d) on device %s port %d\n", + ib_event_msg(event->event), event->event, + event->device->name, event->element.port_num); +} + +/** + * iser_create_device_ib_res - creates Protection Domain (PD), Completion + * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with + * the adapator. + * + * returns 0 on success, -1 on failure + */ +static int iser_create_device_ib_res(struct iser_device *device) +{ + struct ib_device *ib_dev = device->ib_device; + int ret, i, max_cqe; + + ret = iser_assign_reg_ops(device); + if (ret) + return ret; + + device->comps_used = min_t(int, num_online_cpus(), + ib_dev->num_comp_vectors); + + device->comps = kcalloc(device->comps_used, sizeof(*device->comps), + GFP_KERNEL); + if (!device->comps) + goto comps_err; + + max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe); + + iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n", + device->comps_used, ib_dev->name, + ib_dev->num_comp_vectors, max_cqe); + + device->pd = ib_alloc_pd(ib_dev, + iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); + if (IS_ERR(device->pd)) + goto pd_err; + + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i, + IB_POLL_SOFTIRQ); + if (IS_ERR(comp->cq)) { + comp->cq = NULL; + goto cq_err; + } + } + + INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, + iser_event_handler); + ib_register_event_handler(&device->event_handler); + return 0; + +cq_err: + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + if (comp->cq) + ib_free_cq(comp->cq); + } + ib_dealloc_pd(device->pd); +pd_err: + kfree(device->comps); +comps_err: + iser_err("failed to allocate an IB resource\n"); + return -1; +} + +/** + * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, + * CQ and PD created with the device associated with the adapator. + */ +static void iser_free_device_ib_res(struct iser_device *device) +{ + int i; + + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + ib_free_cq(comp->cq); + comp->cq = NULL; + } + + ib_unregister_event_handler(&device->event_handler); + ib_dealloc_pd(device->pd); + + kfree(device->comps); + device->comps = NULL; + device->pd = NULL; +} + +/** + * iser_alloc_fmr_pool - Creates FMR pool and page_vector + * + * returns 0 on success, or errno code on failure + */ +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) +{ + struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_page_vec *page_vec; + struct iser_fr_desc *desc; + struct ib_fmr_pool *fmr_pool; + struct ib_fmr_pool_param params; + int ret; + + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size), + GFP_KERNEL); + if (!page_vec) { + ret = -ENOMEM; + goto err_frpl; + } + + page_vec->pages = (u64 *)(page_vec + 1); + + params.page_shift = SHIFT_4K; + params.max_pages_per_fmr = size; + /* make the pool size twice the max number of SCSI commands * + * the ML is expected to queue, watermark for unmap at 50% */ + params.pool_size = cmds_max * 2; + params.dirty_watermark = cmds_max; + params.cache = 0; + params.flush_function = NULL; + params.access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ); + + fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); + if (IS_ERR(fmr_pool)) { + ret = PTR_ERR(fmr_pool); + iser_err("FMR allocation failed, err %d\n", ret); + goto err_fmr; + } + + desc->rsc.page_vec = page_vec; + desc->rsc.fmr_pool = fmr_pool; + list_add(&desc->list, &fr_pool->list); + + return 0; + +err_fmr: + kfree(page_vec); +err_frpl: + kfree(desc); + + return ret; +} + +/** + * iser_free_fmr_pool - releases the FMR pool and page vec + */ +void iser_free_fmr_pool(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); + list_del(&desc->list); + + iser_info("freeing conn %p fmr pool %p\n", + ib_conn, desc->rsc.fmr_pool); + + ib_destroy_fmr_pool(desc->rsc.fmr_pool); + kfree(desc->rsc.page_vec); + kfree(desc); +} + +static int +iser_alloc_reg_res(struct iser_device *device, + struct ib_pd *pd, + struct iser_reg_resources *res, + unsigned int size) +{ + struct ib_device *ib_dev = device->ib_device; + enum ib_mr_type mr_type; + int ret; + + if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) + mr_type = IB_MR_TYPE_SG_GAPS; + else + mr_type = IB_MR_TYPE_MEM_REG; + + res->mr = ib_alloc_mr(pd, mr_type, size); + if (IS_ERR(res->mr)) { + ret = PTR_ERR(res->mr); + iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); + return ret; + } + res->mr_valid = 0; + + return 0; +} + +static void +iser_free_reg_res(struct iser_reg_resources *rsc) +{ + ib_dereg_mr(rsc->mr); +} + +static int +iser_alloc_pi_ctx(struct iser_device *device, + struct ib_pd *pd, + struct iser_fr_desc *desc, + unsigned int size) +{ + struct iser_pi_context *pi_ctx = NULL; + int ret; + + desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); + if (!desc->pi_ctx) + return -ENOMEM; + + pi_ctx = desc->pi_ctx; + + ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size); + if (ret) { + iser_err("failed to allocate reg_resources\n"); + goto alloc_reg_res_err; + } + + pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); + if (IS_ERR(pi_ctx->sig_mr)) { + ret = PTR_ERR(pi_ctx->sig_mr); + goto sig_mr_failure; + } + pi_ctx->sig_mr_valid = 0; + desc->pi_ctx->sig_protected = 0; + + return 0; + +sig_mr_failure: + iser_free_reg_res(&pi_ctx->rsc); +alloc_reg_res_err: + kfree(desc->pi_ctx); + + return ret; +} + +static void +iser_free_pi_ctx(struct iser_pi_context *pi_ctx) +{ + iser_free_reg_res(&pi_ctx->rsc); + ib_dereg_mr(pi_ctx->sig_mr); + kfree(pi_ctx); +} + +static struct iser_fr_desc * +iser_create_fastreg_desc(struct iser_device *device, + struct ib_pd *pd, + bool pi_enable, + unsigned int size) +{ + struct iser_fr_desc *desc; + int ret; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + ret = iser_alloc_reg_res(device, pd, &desc->rsc, size); + if (ret) + goto reg_res_alloc_failure; + + if (pi_enable) { + ret = iser_alloc_pi_ctx(device, pd, desc, size); + if (ret) + goto pi_ctx_alloc_failure; + } + + return desc; + +pi_ctx_alloc_failure: + iser_free_reg_res(&desc->rsc); +reg_res_alloc_failure: + kfree(desc); + + return ERR_PTR(ret); +} + +/** + * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors + * for fast registration work requests. + * returns 0 on success, or errno code on failure + */ +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) +{ + struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + int i, ret; + + INIT_LIST_HEAD(&fr_pool->list); + INIT_LIST_HEAD(&fr_pool->all_list); + spin_lock_init(&fr_pool->lock); + fr_pool->size = 0; + for (i = 0; i < cmds_max; i++) { + desc = iser_create_fastreg_desc(device, device->pd, + ib_conn->pi_support, size); + if (IS_ERR(desc)) { + ret = PTR_ERR(desc); + goto err; + } + + list_add_tail(&desc->list, &fr_pool->list); + list_add_tail(&desc->all_list, &fr_pool->all_list); + fr_pool->size++; + } + + return 0; + +err: + iser_free_fastreg_pool(ib_conn); + return ret; +} + +/** + * iser_free_fastreg_pool - releases the pool of fast_reg descriptors + */ +void iser_free_fastreg_pool(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc, *tmp; + int i = 0; + + if (list_empty(&fr_pool->all_list)) + return; + + iser_info("freeing conn %p fr pool\n", ib_conn); + + list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { + list_del(&desc->all_list); + iser_free_reg_res(&desc->rsc); + if (desc->pi_ctx) + iser_free_pi_ctx(desc->pi_ctx); + kfree(desc); + ++i; + } + + if (i < fr_pool->size) + iser_warn("pool still has %d regions registered\n", + fr_pool->size - i); +} + +/** + * iser_create_ib_conn_res - Queue-Pair (QP) + * + * returns 0 on success, -1 on failure + */ +static int iser_create_ib_conn_res(struct ib_conn *ib_conn) +{ + struct iser_conn *iser_conn = to_iser_conn(ib_conn); + struct iser_device *device; + struct ib_device *ib_dev; + struct ib_qp_init_attr init_attr; + int ret = -ENOMEM; + int index, min_index = 0; + + BUG_ON(ib_conn->device == NULL); + + device = ib_conn->device; + ib_dev = device->ib_device; + + memset(&init_attr, 0, sizeof init_attr); + + mutex_lock(&ig.connlist_mutex); + /* select the CQ with the minimal number of usages */ + for (index = 0; index < device->comps_used; index++) { + if (device->comps[index].active_qps < + device->comps[min_index].active_qps) + min_index = index; + } + ib_conn->comp = &device->comps[min_index]; + ib_conn->comp->active_qps++; + mutex_unlock(&ig.connlist_mutex); + iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); + + init_attr.event_handler = iser_qp_event_callback; + init_attr.qp_context = (void *)ib_conn; + init_attr.send_cq = ib_conn->comp->cq; + init_attr.recv_cq = ib_conn->comp->cq; + init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; + init_attr.cap.max_send_sge = 2; + init_attr.cap.max_recv_sge = 1; + init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + init_attr.qp_type = IB_QPT_RC; + if (ib_conn->pi_support) { + init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; + init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS); + } else { + if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) { + init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS); + } else { + init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr); + iser_dbg("device %s supports max_send_wr %d\n", + device->ib_device->name, ib_dev->attrs.max_qp_wr); + } + } + + ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); + if (ret) + goto out_err; + + ib_conn->qp = ib_conn->cma_id->qp; + iser_info("setting conn %p cma_id %p qp %p\n", + ib_conn, ib_conn->cma_id, + ib_conn->cma_id->qp); + return ret; + +out_err: + mutex_lock(&ig.connlist_mutex); + ib_conn->comp->active_qps--; + mutex_unlock(&ig.connlist_mutex); + iser_err("unable to alloc mem or create resource, err %d\n", ret); + + return ret; +} + +/** + * based on the resolved device node GUID see if there already allocated + * device for this device. If there's no such, create one. + */ +static +struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) +{ + struct iser_device *device; + + mutex_lock(&ig.device_list_mutex); + + list_for_each_entry(device, &ig.device_list, ig_list) + /* find if there's a match using the node GUID */ + if (device->ib_device->node_guid == cma_id->device->node_guid) + goto inc_refcnt; + + device = kzalloc(sizeof *device, GFP_KERNEL); + if (device == NULL) + goto out; + + /* assign this device to the device */ + device->ib_device = cma_id->device; + /* init the device and link it into ig device list */ + if (iser_create_device_ib_res(device)) { + kfree(device); + device = NULL; + goto out; + } + list_add(&device->ig_list, &ig.device_list); + +inc_refcnt: + device->refcount++; +out: + mutex_unlock(&ig.device_list_mutex); + return device; +} + +/* if there's no demand for this device, release it */ +static void iser_device_try_release(struct iser_device *device) +{ + mutex_lock(&ig.device_list_mutex); + device->refcount--; + iser_info("device %p refcount %d\n", device, device->refcount); + if (!device->refcount) { + iser_free_device_ib_res(device); + list_del(&device->ig_list); + kfree(device); + } + mutex_unlock(&ig.device_list_mutex); +} + +/** + * Called with state mutex held + **/ +static int iser_conn_state_comp_exch(struct iser_conn *iser_conn, + enum iser_conn_state comp, + enum iser_conn_state exch) +{ + int ret; + + ret = (iser_conn->state == comp); + if (ret) + iser_conn->state = exch; + + return ret; +} + +void iser_release_work(struct work_struct *work) +{ + struct iser_conn *iser_conn; + + iser_conn = container_of(work, struct iser_conn, release_work); + + /* Wait for conn_stop to complete */ + wait_for_completion(&iser_conn->stop_completion); + /* Wait for IB resouces cleanup to complete */ + wait_for_completion(&iser_conn->ib_completion); + + mutex_lock(&iser_conn->state_mutex); + iser_conn->state = ISER_CONN_DOWN; + mutex_unlock(&iser_conn->state_mutex); + + iser_conn_release(iser_conn); +} + +/** + * iser_free_ib_conn_res - release IB related resources + * @iser_conn: iser connection struct + * @destroy: indicator if we need to try to release the + * iser device and memory regoins pool (only iscsi + * shutdown and DEVICE_REMOVAL will use this). + * + * This routine is called with the iser state mutex held + * so the cm_id removal is out of here. It is Safe to + * be invoked multiple times. + */ +static void iser_free_ib_conn_res(struct iser_conn *iser_conn, + bool destroy) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + iser_info("freeing conn %p cma_id %p qp %p\n", + iser_conn, ib_conn->cma_id, ib_conn->qp); + + if (ib_conn->qp != NULL) { + mutex_lock(&ig.connlist_mutex); + ib_conn->comp->active_qps--; + mutex_unlock(&ig.connlist_mutex); + rdma_destroy_qp(ib_conn->cma_id); + ib_conn->qp = NULL; + } + + if (destroy) { + if (iser_conn->rx_descs) + iser_free_rx_descriptors(iser_conn); + + if (device != NULL) { + iser_device_try_release(device); + ib_conn->device = NULL; + } + } +} + +/** + * Frees all conn objects and deallocs conn descriptor + */ +void iser_conn_release(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + + mutex_lock(&ig.connlist_mutex); + list_del(&iser_conn->conn_list); + mutex_unlock(&ig.connlist_mutex); + + mutex_lock(&iser_conn->state_mutex); + /* In case we endup here without ep_disconnect being invoked. */ + if (iser_conn->state != ISER_CONN_DOWN) { + iser_warn("iser conn %p state %d, expected state down.\n", + iser_conn, iser_conn->state); + iscsi_destroy_endpoint(iser_conn->ep); + iser_conn->state = ISER_CONN_DOWN; + } + /* + * In case we never got to bind stage, we still need to + * release IB resources (which is safe to call more than once). + */ + iser_free_ib_conn_res(iser_conn, true); + mutex_unlock(&iser_conn->state_mutex); + + if (ib_conn->cma_id != NULL) { + rdma_destroy_id(ib_conn->cma_id); + ib_conn->cma_id = NULL; + } + + kfree(iser_conn); +} + +/** + * triggers start of the disconnect procedures and wait for them to be done + * Called with state mutex held + */ +int iser_conn_terminate(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + int err = 0; + + /* terminate the iser conn only if the conn state is UP */ + if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, + ISER_CONN_TERMINATING)) + return 0; + + iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state); + + /* suspend queuing of new iscsi commands */ + if (iser_conn->iscsi_conn) + iscsi_suspend_queue(iser_conn->iscsi_conn); + + /* + * In case we didn't already clean up the cma_id (peer initiated + * a disconnection), we need to Cause the CMA to change the QP + * state to ERROR. + */ + if (ib_conn->cma_id) { + err = rdma_disconnect(ib_conn->cma_id); + if (err) + iser_err("Failed to disconnect, conn: 0x%p err %d\n", + iser_conn, err); + + /* block until all flush errors are consumed */ + ib_drain_sq(ib_conn->qp); + } + + return 1; +} + +/** + * Called with state mutex held + **/ +static void iser_connect_error(struct rdma_cm_id *cma_id) +{ + struct iser_conn *iser_conn; + + iser_conn = (struct iser_conn *)cma_id->context; + iser_conn->state = ISER_CONN_TERMINATING; +} + +static void +iser_calc_scsi_params(struct iser_conn *iser_conn, + unsigned int max_sectors) +{ + struct iser_device *device = iser_conn->ib_conn.device; + struct ib_device_attr *attr = &device->ib_device->attrs; + unsigned short sg_tablesize, sup_sg_tablesize; + unsigned short reserved_mr_pages; + + /* + * FRs without SG_GAPS or FMRs can only map up to a (device) page per + * entry, but if the first entry is misaligned we'll end up using two + * entries (head and tail) for a single page worth data, so one + * additional entry is required. + */ + if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) && + (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)) + reserved_mr_pages = 0; + else + reserved_mr_pages = 1; + + sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); + if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) + sup_sg_tablesize = + min_t( + uint, ISCSI_ISER_MAX_SG_TABLESIZE, + attr->max_fast_reg_page_list_len - reserved_mr_pages); + else + sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; + + iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); + iser_conn->pages_per_mr = + iser_conn->scsi_sg_tablesize + reserved_mr_pages; +} + +/** + * Called with state mutex held + **/ +static void iser_addr_handler(struct rdma_cm_id *cma_id) +{ + struct iser_device *device; + struct iser_conn *iser_conn; + struct ib_conn *ib_conn; + int ret; + + iser_conn = (struct iser_conn *)cma_id->context; + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + + ib_conn = &iser_conn->ib_conn; + device = iser_device_find_by_ib_device(cma_id); + if (!device) { + iser_err("device lookup/creation failed\n"); + iser_connect_error(cma_id); + return; + } + + ib_conn->device = device; + + /* connection T10-PI support */ + if (iser_pi_enable) { + if (!(device->ib_device->attrs.device_cap_flags & + IB_DEVICE_SIGNATURE_HANDOVER)) { + iser_warn("T10-PI requested but not supported on %s, " + "continue without T10-PI\n", + ib_conn->device->ib_device->name); + ib_conn->pi_support = false; + } else { + ib_conn->pi_support = true; + } + } + + iser_calc_scsi_params(iser_conn, iser_max_sectors); + + ret = rdma_resolve_route(cma_id, 1000); + if (ret) { + iser_err("resolve route failed: %d\n", ret); + iser_connect_error(cma_id); + return; + } +} + +/** + * Called with state mutex held + **/ +static void iser_route_handler(struct rdma_cm_id *cma_id) +{ + struct rdma_conn_param conn_param; + int ret; + struct iser_cm_hdr req_hdr; + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + + ret = iser_create_ib_conn_res(ib_conn); + if (ret) + goto failure; + + memset(&conn_param, 0, sizeof conn_param); + conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom; + conn_param.initiator_depth = 1; + conn_param.retry_count = 7; + conn_param.rnr_retry_count = 6; + + memset(&req_hdr, 0, sizeof(req_hdr)); + req_hdr.flags = ISER_ZBVA_NOT_SUP; + if (!device->remote_inv_sup) + req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP; + conn_param.private_data = (void *)&req_hdr; + conn_param.private_data_len = sizeof(struct iser_cm_hdr); + + ret = rdma_connect(cma_id, &conn_param); + if (ret) { + iser_err("failure connecting: %d\n", ret); + goto failure; + } + + return; +failure: + iser_connect_error(cma_id); +} + +static void iser_connected_handler(struct rdma_cm_id *cma_id, + const void *private_data) +{ + struct iser_conn *iser_conn; + struct ib_qp_attr attr; + struct ib_qp_init_attr init_attr; + + iser_conn = (struct iser_conn *)cma_id->context; + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + + (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); + iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); + + if (private_data) { + u8 flags = *(u8 *)private_data; + + iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP); + } + + iser_info("conn %p: negotiated %s invalidation\n", + iser_conn, iser_conn->snd_w_inv ? "remote" : "local"); + + iser_conn->state = ISER_CONN_UP; + complete(&iser_conn->up_completion); +} + +static void iser_disconnected_handler(struct rdma_cm_id *cma_id) +{ + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + + if (iser_conn_terminate(iser_conn)) { + if (iser_conn->iscsi_conn) + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + else + iser_err("iscsi_iser connection isn't bound\n"); + } +} + +static void iser_cleanup_handler(struct rdma_cm_id *cma_id, + bool destroy) +{ + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + + /* + * We are not guaranteed that we visited disconnected_handler + * by now, call it here to be safe that we handle CM drep + * and flush errors. + */ + iser_disconnected_handler(cma_id); + iser_free_ib_conn_res(iser_conn, destroy); + complete(&iser_conn->ib_completion); +}; + +static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +{ + struct iser_conn *iser_conn; + int ret = 0; + + iser_conn = (struct iser_conn *)cma_id->context; + iser_info("%s (%d): status %d conn %p id %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id->context, cma_id); + + mutex_lock(&iser_conn->state_mutex); + switch (event->event) { + case RDMA_CM_EVENT_ADDR_RESOLVED: + iser_addr_handler(cma_id); + break; + case RDMA_CM_EVENT_ROUTE_RESOLVED: + iser_route_handler(cma_id); + break; + case RDMA_CM_EVENT_ESTABLISHED: + iser_connected_handler(cma_id, event->param.conn.private_data); + break; + case RDMA_CM_EVENT_REJECTED: + iser_info("Connection rejected: %s\n", + rdma_reject_msg(cma_id, event->status)); + /* FALLTHROUGH */ + case RDMA_CM_EVENT_ADDR_ERROR: + case RDMA_CM_EVENT_ROUTE_ERROR: + case RDMA_CM_EVENT_CONNECT_ERROR: + case RDMA_CM_EVENT_UNREACHABLE: + iser_connect_error(cma_id); + break; + case RDMA_CM_EVENT_DISCONNECTED: + case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + iser_cleanup_handler(cma_id, false); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + /* + * we *must* destroy the device as we cannot rely + * on iscsid to be around to initiate error handling. + * also if we are not in state DOWN implicitly destroy + * the cma_id. + */ + iser_cleanup_handler(cma_id, true); + if (iser_conn->state != ISER_CONN_DOWN) { + iser_conn->ib_conn.cma_id = NULL; + ret = 1; + } + break; + default: + iser_err("Unexpected RDMA CM event: %s (%d)\n", + rdma_event_msg(event->event), event->event); + break; + } + mutex_unlock(&iser_conn->state_mutex); + + return ret; +} + +void iser_conn_init(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + + iser_conn->state = ISER_CONN_INIT; + init_completion(&iser_conn->stop_completion); + init_completion(&iser_conn->ib_completion); + init_completion(&iser_conn->up_completion); + INIT_LIST_HEAD(&iser_conn->conn_list); + mutex_init(&iser_conn->state_mutex); + + ib_conn->post_recv_buf_count = 0; + ib_conn->reg_cqe.done = iser_reg_comp; +} + + /** + * starts the process of connecting to the target + * sleeps until the connection is established or rejected + */ +int iser_connect(struct iser_conn *iser_conn, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, + int non_blocking) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + int err = 0; + + mutex_lock(&iser_conn->state_mutex); + + sprintf(iser_conn->name, "%pISp", dst_addr); + + iser_info("connecting to: %s\n", iser_conn->name); + + /* the device is known only --after-- address resolution */ + ib_conn->device = NULL; + + iser_conn->state = ISER_CONN_PENDING; + + ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, + (void *)iser_conn, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(ib_conn->cma_id)) { + err = PTR_ERR(ib_conn->cma_id); + iser_err("rdma_create_id failed: %d\n", err); + goto id_failure; + } + + err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); + if (err) { + iser_err("rdma_resolve_addr failed: %d\n", err); + goto addr_failure; + } + + if (!non_blocking) { + wait_for_completion_interruptible(&iser_conn->up_completion); + + if (iser_conn->state != ISER_CONN_UP) { + err = -EIO; + goto connect_failure; + } + } + mutex_unlock(&iser_conn->state_mutex); + + mutex_lock(&ig.connlist_mutex); + list_add(&iser_conn->conn_list, &ig.connlist); + mutex_unlock(&ig.connlist_mutex); + return 0; + +id_failure: + ib_conn->cma_id = NULL; +addr_failure: + iser_conn->state = ISER_CONN_DOWN; +connect_failure: + mutex_unlock(&iser_conn->state_mutex); + iser_conn_release(iser_conn); + return err; +} + +int iser_post_recvl(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_login_desc *desc = &iser_conn->login_desc; + struct ib_recv_wr wr; + int ib_ret; + + desc->sge.addr = desc->rsp_dma; + desc->sge.length = ISER_RX_LOGIN_SIZE; + desc->sge.lkey = ib_conn->device->pd->local_dma_lkey; + + desc->cqe.done = iser_login_rsp; + wr.wr_cqe = &desc->cqe; + wr.sg_list = &desc->sge; + wr.num_sge = 1; + wr.next = NULL; + + ib_conn->post_recv_buf_count++; + ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL); + if (ib_ret) { + iser_err("ib_post_recv failed ret=%d\n", ib_ret); + ib_conn->post_recv_buf_count--; + } + + return ib_ret; +} + +int iser_post_recvm(struct iser_conn *iser_conn, int count) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + unsigned int my_rx_head = iser_conn->rx_desc_head; + struct iser_rx_desc *rx_desc; + struct ib_recv_wr *wr; + int i, ib_ret; + + for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { + rx_desc = &iser_conn->rx_descs[my_rx_head]; + rx_desc->cqe.done = iser_task_rsp; + wr->wr_cqe = &rx_desc->cqe; + wr->sg_list = &rx_desc->rx_sg; + wr->num_sge = 1; + wr->next = wr + 1; + my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask; + } + + wr--; + wr->next = NULL; /* mark end of work requests list */ + + ib_conn->post_recv_buf_count += count; + ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL); + if (ib_ret) { + iser_err("ib_post_recv failed ret=%d\n", ib_ret); + ib_conn->post_recv_buf_count -= count; + } else + iser_conn->rx_desc_head = my_rx_head; + + return ib_ret; +} + + +/** + * iser_start_send - Initiate a Send DTO operation + * + * returns 0 on success, -1 on failure + */ +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, + bool signal) +{ + struct ib_send_wr *wr = iser_tx_next_wr(tx_desc); + int ib_ret; + + ib_dma_sync_single_for_device(ib_conn->device->ib_device, + tx_desc->dma_addr, ISER_HEADERS_LEN, + DMA_TO_DEVICE); + + wr->next = NULL; + wr->wr_cqe = &tx_desc->cqe; + wr->sg_list = tx_desc->tx_sg; + wr->num_sge = tx_desc->num_sge; + wr->opcode = IB_WR_SEND; + wr->send_flags = signal ? IB_SEND_SIGNALED : 0; + + ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL); + if (ib_ret) + iser_err("ib_post_send failed, ret:%d opcode:%d\n", + ib_ret, wr->opcode); + + return ib_ret; +} + +u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir, sector_t *sector) +{ + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + struct iser_fr_desc *desc = reg->mem_h; + unsigned long sector_size = iser_task->sc->device->sector_size; + struct ib_mr_status mr_status; + int ret; + + if (desc && desc->pi_ctx->sig_protected) { + desc->pi_ctx->sig_protected = 0; + ret = ib_check_mr_status(desc->pi_ctx->sig_mr, + IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + pr_err("ib_check_mr_status failed, ret %d\n", ret); + /* Not a lot we can do, return ambiguous guard error */ + *sector = 0; + return 0x1; + } + + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + sector_t sector_off = mr_status.sig_err.sig_err_offset; + + sector_div(sector_off, sector_size + 8); + *sector = scsi_get_lba(iser_task->sc) + sector_off; + + pr_err("PI error found type %d at sector %llx " + "expected %x vs actual %x\n", + mr_status.sig_err.err_type, + (unsigned long long)*sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); + + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + return 0x1; + case IB_SIG_BAD_REFTAG: + return 0x3; + case IB_SIG_BAD_APPTAG: + return 0x2; + } + } + } + + return 0; +} + +void iser_err_comp(struct ib_wc *wc, const char *type) +{ + if (wc->status != IB_WC_WR_FLUSH_ERR) { + struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context); + + iser_err("%s failure: %s (%d) vend_err %#x\n", type, + ib_wc_status_msg(wc->status), wc->status, + wc->vendor_err); + + if (iser_conn->iscsi_conn) + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + } else { + iser_dbg("%s failure: %s (%d)\n", type, + ib_wc_status_msg(wc->status), wc->status); + } +} |