summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/Makefile8
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig50
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h843
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1661
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c236
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c251
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1311
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2698
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c1051
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c199
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c294
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c293
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig13
-rw-r--r--drivers/infiniband/ulp/iser/Makefile5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1087
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h574
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c746
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c391
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c967
-rw-r--r--drivers/infiniband/ulp/isert/Kconfig6
-rw-r--r--drivers/infiniband/ulp/isert/Makefile2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2668
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h212
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Kconfig9
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Makefile9
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c513
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h524
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c185
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h329
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c400
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c1056
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c390
-rw-r--r--drivers/infiniband/ulp/rtrs/Kconfig27
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile21
-rw-r--r--drivers/infiniband/ulp/rtrs/README213
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c198
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c514
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c15
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h86
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c3180
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h251
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-log.h28
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h409
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c51
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c315
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h88
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2314
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h154
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c656
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h188
-rw-r--r--drivers/infiniband/ulp/srp/Kbuild2
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c4225
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h350
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig13
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_dm_mad.h139
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c3958
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h469
61 files changed, 36889 insertions, 0 deletions
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
new file mode 100644
index 000000000..4d0004b58
--- /dev/null
+++ b/drivers/infiniband/ulp/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_IPOIB) += ipoib/
+obj-$(CONFIG_INFINIBAND_SRP) += srp/
+obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
+obj-$(CONFIG_INFINIBAND_ISER) += iser/
+obj-$(CONFIG_INFINIBAND_ISERT) += isert/
+obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic/
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs/
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
new file mode 100644
index 000000000..254e31a90
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_IPOIB
+ tristate "IP-over-InfiniBand"
+ depends on NETDEVICES && INET && (IPV6 || IPV6=n)
+ help
+ Support for the IP-over-InfiniBand protocol (IPoIB). This
+ transports IP packets over InfiniBand so you can use your IB
+ device as a fancy NIC.
+
+ See Documentation/infiniband/ipoib.rst for more information
+
+config INFINIBAND_IPOIB_CM
+ bool "IP-over-InfiniBand Connected Mode support"
+ depends on INFINIBAND_IPOIB
+ default n
+ help
+ This option enables support for IPoIB connected mode. After
+ enabling this option, you need to switch to connected mode
+ through /sys/class/net/ibXXX/mode to actually create
+ connections, and then increase the interface MTU with
+ e.g. ifconfig ib0 mtu 65520.
+
+ WARNING: Enabling connected mode will trigger some packet
+ drops for multicast and UD mode traffic from this interface,
+ unless you limit mtu for these destinations to 2044.
+
+config INFINIBAND_IPOIB_DEBUG
+ bool "IP-over-InfiniBand debugging" if EXPERT
+ depends on INFINIBAND_IPOIB
+ default y
+ help
+ This option causes debugging code to be compiled into the
+ IPoIB driver. The output can be turned on via the
+ debug_level and mcast_debug_level module parameters (which
+ can also be set after the driver is loaded through sysfs).
+
+ This option also creates a directory tree under ipoib/ in
+ debugfs, which contains files that expose debugging
+ information about IB multicast groups used by the IPoIB
+ driver.
+
+config INFINIBAND_IPOIB_DEBUG_DATA
+ bool "IP-over-InfiniBand data path debugging"
+ depends on INFINIBAND_IPOIB_DEBUG
+ help
+ This option compiles debugging code into the data path
+ of the IPoIB driver. The output can be turned on via the
+ data_debug_level module parameter; however, even with output
+ turned off, this debugging code will have some performance
+ impact.
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
new file mode 100644
index 000000000..6ece857ed
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
+
+ib_ipoib-y := ipoib_main.o \
+ ipoib_ib.o \
+ ipoib_multicast.o \
+ ipoib_verbs.o \
+ ipoib_vlan.o \
+ ipoib_ethtool.o \
+ ipoib_netlink.o
+ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o
+ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
+
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
new file mode 100644
index 000000000..35e9c8a33
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -0,0 +1,843 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _IPOIB_H
+#define _IPOIB_H
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/kref.h>
+#include <linux/if_infiniband.h>
+#include <linux/mutex.h>
+
+#include <net/neighbour.h>
+#include <net/sch_generic.h>
+
+#include <linux/atomic.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_sa.h>
+#include <linux/sched.h>
+/* constants */
+
+enum ipoib_flush_level {
+ IPOIB_FLUSH_LIGHT,
+ IPOIB_FLUSH_NORMAL,
+ IPOIB_FLUSH_HEAVY
+};
+
+enum {
+ IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
+
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
+
+ IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
+ IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
+ IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
+ IPOIB_CM_RX_SG = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
+ IPOIB_RX_RING_SIZE = 256,
+ IPOIB_TX_RING_SIZE = 128,
+ IPOIB_MAX_QUEUE_SIZE = 8192,
+ IPOIB_MIN_QUEUE_SIZE = 2,
+ IPOIB_CM_MAX_CONN_QP = 4096,
+
+ IPOIB_NUM_WC = 4,
+
+ IPOIB_MAX_PATH_REC_QUEUE = 3,
+ IPOIB_MAX_MCAST_QUEUE = 64,
+
+ IPOIB_FLAG_OPER_UP = 0,
+ IPOIB_FLAG_INITIALIZED = 1,
+ IPOIB_FLAG_ADMIN_UP = 2,
+ IPOIB_PKEY_ASSIGNED = 3,
+ IPOIB_FLAG_SUBINTERFACE = 5,
+ IPOIB_STOP_REAPER = 7,
+ IPOIB_FLAG_ADMIN_CM = 9,
+ IPOIB_FLAG_UMCAST = 10,
+ IPOIB_NEIGH_TBL_FLUSH = 12,
+ IPOIB_FLAG_DEV_ADDR_SET = 13,
+ IPOIB_FLAG_DEV_ADDR_CTRL = 14,
+
+ IPOIB_MAX_BACKOFF_SECONDS = 16,
+
+ IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
+ IPOIB_MCAST_FLAG_SENDONLY = 1,
+ /*
+ * For IPOIB_MCAST_FLAG_BUSY
+ * When set, in flight join and mcast->mc is unreliable
+ * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
+ * haven't started yet
+ * When clear and mcast->mc is valid pointer, join was successful
+ */
+ IPOIB_MCAST_FLAG_BUSY = 2,
+ IPOIB_MCAST_FLAG_ATTACHED = 3,
+
+ MAX_SEND_CQE = 64,
+ IPOIB_CM_COPYBREAK = 256,
+
+ IPOIB_NON_CHILD = 0,
+ IPOIB_LEGACY_CHILD = 1,
+ IPOIB_RTNL_CHILD = 2,
+};
+
+#define IPOIB_OP_RECV (1ul << 31)
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+#define IPOIB_OP_CM (1ul << 30)
+#else
+#define IPOIB_OP_CM (0)
+#endif
+
+#define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF))
+
+/* structs */
+
+struct ipoib_header {
+ __be16 proto;
+ u16 reserved;
+};
+
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
+};
+
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
+{
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
+}
+
+static inline struct ipoib_dev_priv *ipoib_priv(const struct net_device *dev)
+{
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ return rn->clnt_priv;
+}
+
+/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
+struct ipoib_mcast {
+ struct ib_sa_mcmember_rec mcmember;
+ struct ib_sa_multicast *mc;
+ struct ipoib_ah *ah;
+
+ struct rb_node rb_node;
+ struct list_head list;
+
+ unsigned long created;
+ unsigned long backoff;
+ unsigned long delay_until;
+
+ unsigned long flags;
+ unsigned char logcount;
+
+ struct list_head neigh_list;
+
+ struct sk_buff_head pkt_queue;
+
+ struct net_device *dev;
+ struct completion done;
+};
+
+struct ipoib_rx_buf {
+ struct sk_buff *skb;
+ u64 mapping[IPOIB_UD_RX_SG];
+};
+
+struct ipoib_tx_buf {
+ struct sk_buff *skb;
+ u64 mapping[MAX_SKB_FRAGS + 1];
+};
+
+struct ib_cm_id;
+
+struct ipoib_cm_data {
+ __be32 qpn; /* High byte MUST be ignored on receive */
+ __be32 mtu;
+};
+
+/*
+ * Quoting 10.3.1 Queue Pair and EE Context States:
+ *
+ * Note, for QPs that are associated with an SRQ, the Consumer should take the
+ * QP through the Error State before invoking a Destroy QP or a Modify QP to the
+ * Reset State. The Consumer may invoke the Destroy QP without first performing
+ * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
+ * Last WQE Reached Event. However, if the Consumer does not wait for the
+ * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
+ * leakage may occur. Therefore, it is good programming practice to tear down a
+ * QP that is associated with an SRQ by using the following process:
+ *
+ * - Put the QP in the Error State
+ * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
+ * - either:
+ * drain the CQ by invoking the Poll CQ verb and either wait for CQ
+ * to be empty or the number of Poll CQ operations has exceeded
+ * CQ capacity size;
+ * - or
+ * post another WR that completes on the same CQ and wait for this
+ * WR to return as a WC;
+ * - and then invoke a Destroy QP or Reset QP.
+ *
+ * We use the second option and wait for a completion on the
+ * same CQ before destroying QPs attached to our SRQ.
+ */
+
+enum ipoib_cm_state {
+ IPOIB_CM_RX_LIVE,
+ IPOIB_CM_RX_ERROR, /* Ignored by stale task */
+ IPOIB_CM_RX_FLUSH /* Last WQE Reached event observed */
+};
+
+struct ipoib_cm_rx {
+ struct ib_cm_id *id;
+ struct ib_qp *qp;
+ struct ipoib_cm_rx_buf *rx_ring;
+ struct list_head list;
+ struct net_device *dev;
+ unsigned long jiffies;
+ enum ipoib_cm_state state;
+ int recv_count;
+};
+
+struct ipoib_cm_tx {
+ struct ib_cm_id *id;
+ struct ib_qp *qp;
+ struct list_head list;
+ struct net_device *dev;
+ struct ipoib_neigh *neigh;
+ struct ipoib_tx_buf *tx_ring;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ unsigned long flags;
+ u32 mtu;
+ unsigned int max_send_sge;
+};
+
+struct ipoib_cm_rx_buf {
+ struct sk_buff *skb;
+ u64 mapping[IPOIB_CM_RX_SG];
+};
+
+struct ipoib_cm_dev_priv {
+ struct ib_srq *srq;
+ struct ipoib_cm_rx_buf *srq_ring;
+ struct ib_cm_id *id;
+ struct list_head passive_ids; /* state: LIVE */
+ struct list_head rx_error_list; /* state: ERROR */
+ struct list_head rx_flush_list; /* state: FLUSH, drain not started */
+ struct list_head rx_drain_list; /* state: FLUSH, drain started */
+ struct list_head rx_reap_list; /* state: FLUSH, drain done */
+ struct work_struct start_task;
+ struct work_struct reap_task;
+ struct work_struct skb_task;
+ struct work_struct rx_reap_task;
+ struct delayed_work stale_task;
+ struct sk_buff_head skb_queue;
+ struct list_head start_list;
+ struct list_head reap_list;
+ struct ib_wc ibwc[IPOIB_NUM_WC];
+ struct ib_sge rx_sge[IPOIB_CM_RX_SG];
+ struct ib_recv_wr rx_wr;
+ int nonsrq_conn_qp;
+ int max_cm_mtu;
+ int num_frags;
+};
+
+struct ipoib_ethtool_st {
+ u16 coalesce_usecs;
+ u16 max_coalesced_frames;
+};
+
+struct ipoib_neigh_table;
+
+struct ipoib_neigh_hash {
+ struct ipoib_neigh_table *ntbl;
+ struct ipoib_neigh __rcu **buckets;
+ struct rcu_head rcu;
+ u32 mask;
+ u32 size;
+};
+
+struct ipoib_neigh_table {
+ struct ipoib_neigh_hash __rcu *htbl;
+ atomic_t entries;
+ struct completion flushed;
+ struct completion deleted;
+};
+
+struct ipoib_qp_state_validate {
+ struct work_struct work;
+ struct ipoib_dev_priv *priv;
+};
+
+/*
+ * Device private locking: network stack tx_lock protects members used
+ * in TX fast path, lock protects everything else. lock nests inside
+ * of tx_lock (ie tx_lock must be acquired first if needed).
+ */
+struct ipoib_dev_priv {
+ spinlock_t lock;
+
+ struct net_device *dev;
+ void (*next_priv_destructor)(struct net_device *dev);
+
+ struct napi_struct send_napi;
+ struct napi_struct recv_napi;
+
+ unsigned long flags;
+
+ /*
+ * This protects access to the child_intfs list.
+ * To READ from child_intfs the RTNL or vlan_rwsem read side must be
+ * held. To WRITE RTNL and the vlan_rwsem write side must be held (in
+ * that order) This lock exists because we have a few contexts where
+ * we need the child_intfs, but do not want to grab the RTNL.
+ */
+ struct rw_semaphore vlan_rwsem;
+ struct mutex mcast_mutex;
+
+ struct rb_root path_tree;
+ struct list_head path_list;
+
+ struct ipoib_neigh_table ntbl;
+
+ struct ipoib_mcast *broadcast;
+ struct list_head multicast_list;
+ struct rb_root multicast_tree;
+
+ struct workqueue_struct *wq;
+ struct delayed_work mcast_task;
+ struct work_struct carrier_on_task;
+ struct work_struct flush_light;
+ struct work_struct flush_normal;
+ struct work_struct flush_heavy;
+ struct work_struct restart_task;
+ struct delayed_work ah_reap_task;
+ struct delayed_work neigh_reap_task;
+ struct ib_device *ca;
+ u8 port;
+ u16 pkey;
+ u16 pkey_index;
+ struct ib_pd *pd;
+ struct ib_cq *recv_cq;
+ struct ib_cq *send_cq;
+ struct ib_qp *qp;
+ u32 qkey;
+
+ union ib_gid local_gid;
+ u32 local_lid;
+
+ unsigned int admin_mtu;
+ unsigned int mcast_mtu;
+ unsigned int max_ib_mtu;
+
+ struct ipoib_rx_buf *rx_ring;
+
+ struct ipoib_tx_buf *tx_ring;
+ /* cyclic ring variables for managing tx_ring, for UD only */
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ /* cyclic ring variables for counting overall outstanding send WRs */
+ unsigned int global_tx_head;
+ unsigned int global_tx_tail;
+ struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
+ struct ib_ud_wr tx_wr;
+ struct ib_wc send_wc[MAX_SEND_CQE];
+
+ struct ib_recv_wr rx_wr;
+ struct ib_sge rx_sge[IPOIB_UD_RX_SG];
+
+ struct ib_wc ibwc[IPOIB_NUM_WC];
+
+ struct list_head dead_ahs;
+
+ struct ib_event_handler event_handler;
+
+ struct net_device *parent;
+ struct list_head child_intfs;
+ struct list_head list;
+ int child_type;
+
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+ struct ipoib_cm_dev_priv cm;
+#endif
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ struct list_head fs_list;
+ struct dentry *mcg_dentry;
+ struct dentry *path_dentry;
+#endif
+ u64 hca_caps;
+ u64 kernel_caps;
+ struct ipoib_ethtool_st ethtool;
+ unsigned int max_send_sge;
+ const struct net_device_ops *rn_ops;
+};
+
+struct ipoib_ah {
+ struct net_device *dev;
+ struct ib_ah *ah;
+ struct list_head list;
+ struct kref ref;
+ unsigned int last_send;
+ int valid;
+};
+
+struct ipoib_path {
+ struct net_device *dev;
+ struct sa_path_rec pathrec;
+ struct ipoib_ah *ah;
+ struct sk_buff_head queue;
+
+ struct list_head neigh_list;
+
+ int query_id;
+ struct ib_sa_query *query;
+ struct completion done;
+
+ struct rb_node rb_node;
+ struct list_head list;
+};
+
+struct ipoib_neigh {
+ struct ipoib_ah *ah;
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+ struct ipoib_cm_tx *cm;
+#endif
+ u8 daddr[INFINIBAND_ALEN];
+ struct sk_buff_head queue;
+
+ struct net_device *dev;
+
+ struct list_head list;
+ struct ipoib_neigh __rcu *hnext;
+ struct rcu_head rcu;
+ refcount_t refcnt;
+ unsigned long alive;
+};
+
+#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
+
+void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
+static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
+{
+ if (refcount_dec_and_test(&neigh->refcnt))
+ ipoib_neigh_dtor(neigh);
+}
+struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr);
+struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
+ struct net_device *dev);
+void ipoib_neigh_free(struct ipoib_neigh *neigh);
+void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid);
+
+extern struct workqueue_struct *ipoib_workqueue;
+
+/* functions */
+
+int ipoib_rx_poll(struct napi_struct *napi, int budget);
+int ipoib_tx_poll(struct napi_struct *napi, int budget);
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr);
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr);
+
+struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
+ struct ib_pd *pd, struct rdma_ah_attr *attr);
+void ipoib_free_ah(struct kref *kref);
+static inline void ipoib_put_ah(struct ipoib_ah *ah)
+{
+ kref_put(&ah->ref, ipoib_free_ah);
+}
+int ipoib_open(struct net_device *dev);
+void ipoib_intf_free(struct net_device *dev);
+int ipoib_add_pkey_attr(struct net_device *dev);
+int ipoib_add_umcast_attr(struct net_device *dev);
+
+int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn);
+void ipoib_reap_ah(struct work_struct *work);
+
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
+void ipoib_mark_paths_invalid(struct net_device *dev);
+void ipoib_flush_paths(struct net_device *dev);
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
+ const char *format);
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
+ struct net_device *dev);
+void ipoib_ib_tx_timer_func(struct timer_list *t);
+void ipoib_ib_dev_flush_light(struct work_struct *work);
+void ipoib_ib_dev_flush_normal(struct work_struct *work);
+void ipoib_ib_dev_flush_heavy(struct work_struct *work);
+void ipoib_pkey_event(struct work_struct *work);
+void ipoib_ib_dev_cleanup(struct net_device *dev);
+
+int ipoib_ib_dev_open_default(struct net_device *dev);
+int ipoib_ib_dev_open(struct net_device *dev);
+void ipoib_ib_dev_stop(struct net_device *dev);
+void ipoib_ib_dev_up(struct net_device *dev);
+void ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop_default(struct net_device *dev);
+void ipoib_pkey_dev_check_presence(struct net_device *dev);
+
+void ipoib_mcast_join_task(struct work_struct *work);
+void ipoib_mcast_carrier_on_task(struct work_struct *work);
+void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
+
+void ipoib_mcast_restart_task(struct work_struct *work);
+void ipoib_mcast_start_thread(struct net_device *dev);
+void ipoib_mcast_stop_thread(struct net_device *dev);
+
+void ipoib_mcast_dev_down(struct net_device *dev);
+void ipoib_mcast_dev_flush(struct net_device *dev);
+
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req);
+
+struct rtnl_link_ops *ipoib_get_link_ops(void);
+
+static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
+{
+ int i, off;
+ struct sk_buff *skb = tx_req->skb;
+ skb_frag_t *frags = skb_shinfo(skb)->frags;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ u64 *mapping = tx_req->mapping;
+
+ if (skb_headlen(skb)) {
+ priv->tx_sge[0].addr = mapping[0];
+ priv->tx_sge[0].length = skb_headlen(skb);
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < nr_frags; ++i) {
+ priv->tx_sge[i + off].addr = mapping[i + off];
+ priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
+ }
+ priv->tx_wr.wr.num_sge = nr_frags + off;
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
+int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
+void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
+ union ib_gid *gid,
+ unsigned long *created,
+ unsigned int *queuelen,
+ unsigned int *complete,
+ unsigned int *send_only);
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev);
+int ipoib_path_iter_next(struct ipoib_path_iter *iter);
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+ struct ipoib_path *path);
+#endif
+
+int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey);
+int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid);
+void ipoib_mcast_remove_list(struct list_head *remove_list);
+void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
+ struct list_head *remove_list);
+
+int ipoib_init_qp(struct net_device *dev);
+int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
+void ipoib_transport_dev_cleanup(struct net_device *dev);
+
+void ipoib_event(struct ib_event_handler *handler,
+ struct ib_event *record);
+
+int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
+int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
+
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+ u16 pkey, int child_type);
+
+int __init ipoib_netlink_init(void);
+void __exit ipoib_netlink_fini(void);
+
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
+int ipoib_set_mode(struct net_device *dev, const char *buf);
+
+void ipoib_setup_common(struct net_device *dev);
+
+void ipoib_pkey_open(struct ipoib_dev_priv *priv);
+void ipoib_drain_cq(struct net_device *dev);
+
+void ipoib_set_ethtool_ops(struct net_device *dev);
+
+#define IPOIB_FLAGS_RC 0x80
+#define IPOIB_FLAGS_UC 0x40
+
+/* We don't support UC connections at the moment */
+#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
+
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+
+extern int ipoib_max_conn_qp;
+
+static inline int ipoib_cm_admin_enabled(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return IPOIB_CM_SUPPORTED(dev->dev_addr) &&
+ test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+}
+
+static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return IPOIB_CM_SUPPORTED(hwaddr) &&
+ test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+}
+
+static inline int ipoib_cm_up(struct ipoib_neigh *neigh)
+
+{
+ return test_bit(IPOIB_FLAG_OPER_UP, &neigh->cm->flags);
+}
+
+static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh)
+{
+ return neigh->cm;
+}
+
+static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx)
+{
+ neigh->cm = tx;
+}
+
+static inline int ipoib_cm_has_srq(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return !!priv->cm.srq;
+}
+
+static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return priv->cm.max_cm_mtu;
+}
+
+void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx);
+int ipoib_cm_dev_open(struct net_device *dev);
+void ipoib_cm_dev_stop(struct net_device *dev);
+int ipoib_cm_dev_init(struct net_device *dev);
+int ipoib_cm_add_mode_attr(struct net_device *dev);
+void ipoib_cm_dev_cleanup(struct net_device *dev);
+struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
+ struct ipoib_neigh *neigh);
+void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx);
+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ unsigned int mtu);
+void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
+void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
+#else
+
+#define ipoib_max_conn_qp 0
+
+static inline int ipoib_cm_admin_enabled(struct net_device *dev)
+{
+ return 0;
+}
+static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
+
+{
+ return 0;
+}
+
+static inline int ipoib_cm_up(struct ipoib_neigh *neigh)
+
+{
+ return 0;
+}
+
+static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh)
+{
+ return NULL;
+}
+
+static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx)
+{
+}
+
+static inline int ipoib_cm_has_srq(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline
+void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
+{
+ return;
+}
+
+static inline
+int ipoib_cm_dev_open(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline
+void ipoib_cm_dev_stop(struct net_device *dev)
+{
+ return;
+}
+
+static inline
+int ipoib_cm_dev_init(struct net_device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+void ipoib_cm_dev_cleanup(struct net_device *dev)
+{
+ return;
+}
+
+static inline
+struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
+ struct ipoib_neigh *neigh)
+{
+ return NULL;
+}
+
+static inline
+void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
+{
+ return;
+}
+
+static inline
+int ipoib_cm_add_mode_attr(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ unsigned int mtu)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static inline void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+}
+
+static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+}
+#endif
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+void ipoib_create_debug_files(struct net_device *dev);
+void ipoib_delete_debug_files(struct net_device *dev);
+void ipoib_register_debugfs(void);
+void ipoib_unregister_debugfs(void);
+#else
+static inline void ipoib_create_debug_files(struct net_device *dev) { }
+static inline void ipoib_delete_debug_files(struct net_device *dev) { }
+static inline void ipoib_register_debugfs(void) { }
+static inline void ipoib_unregister_debugfs(void) { }
+#endif
+
+#define ipoib_printk(level, priv, format, arg...) \
+ printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
+#define ipoib_warn(priv, format, arg...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ 10 * HZ /*10 seconds */, \
+ 100); \
+ if (__ratelimit(&_rs)) \
+ ipoib_printk(KERN_WARNING, priv, format , ## arg);\
+} while (0)
+
+extern int ipoib_sendq_size;
+extern int ipoib_recvq_size;
+
+extern struct ib_sa_client ipoib_sa_client;
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+extern int ipoib_debug_level;
+
+#define ipoib_dbg(priv, format, arg...) \
+ do { \
+ if (ipoib_debug_level > 0) \
+ ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ } while (0)
+#define ipoib_dbg_mcast(priv, format, arg...) \
+ do { \
+ if (mcast_debug_level > 0) \
+ ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ } while (0)
+#else /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+#define ipoib_dbg(priv, format, arg...) \
+ do { (void) (priv); } while (0)
+#define ipoib_dbg_mcast(priv, format, arg...) \
+ do { (void) (priv); } while (0)
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+#define ipoib_dbg_data(priv, format, arg...) \
+ do { \
+ if (data_debug_level > 0) \
+ ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ } while (0)
+#else /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
+#define ipoib_dbg_data(priv, format, arg...) \
+ do { (void) (priv); } while (0)
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
+
+#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
+
+#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
new file mode 100644
index 000000000..b610d3629
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -0,0 +1,1661 @@
+/*
+ * Copyright (c) 2006 Mellanox Technologies. All rights reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_cm.h>
+#include <net/dst.h>
+#include <net/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/moduleparam.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+
+#include "ipoib.h"
+
+int ipoib_max_conn_qp = 128;
+
+module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
+MODULE_PARM_DESC(max_nonsrq_conn_qp,
+ "Max number of connected-mode QPs per interface "
+ "(applied only if shared receive queue is not available)");
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+static int data_debug_level;
+
+module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
+MODULE_PARM_DESC(cm_data_debug_level,
+ "Enable data path debug tracing for connected mode if > 0");
+#endif
+
+#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
+
+#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
+#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
+#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
+#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
+static struct ib_qp_attr ipoib_cm_err_attr = {
+ .qp_state = IB_QPS_ERR
+};
+
+#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
+
+static struct ib_send_wr ipoib_cm_rx_drain_wr = {
+ .opcode = IB_WR_SEND,
+};
+
+static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event);
+
+static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
+ u64 mapping[IPOIB_CM_RX_SG])
+{
+ int i;
+
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
+
+ for (i = 0; i < frags; ++i)
+ ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
+static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i, ret;
+
+ priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
+
+ for (i = 0; i < priv->cm.num_frags; ++i)
+ priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
+
+ ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
+ ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
+ priv->cm.srq_ring[id].mapping);
+ dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
+ priv->cm.srq_ring[id].skb = NULL;
+ }
+
+ return ret;
+}
+
+static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
+ struct ipoib_cm_rx *rx,
+ struct ib_recv_wr *wr,
+ struct ib_sge *sge, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i, ret;
+
+ wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
+
+ for (i = 0; i < IPOIB_CM_RX_SG; ++i)
+ sge[i].addr = rx->rx_ring[id].mapping[i];
+
+ ret = ib_post_recv(rx->qp, wr, NULL);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
+ ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+ rx->rx_ring[id].mapping);
+ dev_kfree_skb_any(rx->rx_ring[id].skb);
+ rx->rx_ring[id].skb = NULL;
+ }
+
+ return ret;
+}
+
+static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+ struct ipoib_cm_rx_buf *rx_ring,
+ int id, int frags,
+ u64 mapping[IPOIB_CM_RX_SG],
+ gfp_t gfp)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
+ * IP header to a multiple of 16.
+ */
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
+
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ for (i = 0; i < frags; i++) {
+ struct page *page = alloc_page(gfp);
+
+ if (!page)
+ goto partial_error;
+ skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+
+ mapping[i + 1] = ib_dma_map_page(priv->ca, page,
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
+ goto partial_error;
+ }
+
+ rx_ring[id].skb = skb;
+ return skb;
+
+partial_error:
+
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
+
+ for (; i > 0; --i)
+ ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static void ipoib_cm_free_rx_ring(struct net_device *dev,
+ struct ipoib_cm_rx_buf *rx_ring)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < ipoib_recvq_size; ++i)
+ if (rx_ring[i].skb) {
+ ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+ rx_ring[i].mapping);
+ dev_kfree_skb_any(rx_ring[i].skb);
+ }
+
+ vfree(rx_ring);
+}
+
+static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_cm_rx *p;
+
+ /* We only reserved 1 extra slot in CQ for drain WRs, so
+ * make sure we have at most 1 outstanding WR. */
+ if (list_empty(&priv->cm.rx_flush_list) ||
+ !list_empty(&priv->cm.rx_drain_list))
+ return;
+
+ /*
+ * QPs on flush list are error state. This way, a "flush
+ * error" WC will be immediately generated for each WR we post.
+ */
+ p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
+ ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
+ if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
+ ipoib_warn(priv, "failed to post drain wr\n");
+
+ list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
+}
+
+static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
+{
+ struct ipoib_cm_rx *p = ctx;
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned long flags;
+
+ if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
+ return;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_move(&p->list, &priv->cm.rx_flush_list);
+ p->state = IPOIB_CM_RX_FLUSH;
+ ipoib_cm_start_rx_drain(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
+ struct ipoib_cm_rx *p)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_init_attr attr = {
+ .event_handler = ipoib_cm_rx_event_handler,
+ .send_cq = priv->recv_cq, /* For drain WR */
+ .recv_cq = priv->recv_cq,
+ .srq = priv->cm.srq,
+ .cap.max_send_wr = 1, /* For drain WR */
+ .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
+ .sq_sig_type = IB_SIGNAL_ALL_WR,
+ .qp_type = IB_QPT_RC,
+ .qp_context = p,
+ };
+
+ if (!ipoib_cm_has_srq(dev)) {
+ attr.cap.max_recv_wr = ipoib_recvq_size;
+ attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
+ }
+
+ return ib_create_qp(priv->pd, &attr);
+}
+
+static int ipoib_cm_modify_rx_qp(struct net_device *dev,
+ struct ib_cm_id *cm_id, struct ib_qp *qp,
+ unsigned int psn)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
+ return ret;
+ }
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
+ return ret;
+ }
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
+ return ret;
+ }
+ qp_attr.rq_psn = psn;
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Current Mellanox HCA firmware won't generate completions
+ * with error for drain WRs unless the QP has been moved to
+ * RTS first. This work-around leaves a window where a QP has
+ * moved to error asynchronously, but this will eventually get
+ * fixed in firmware, so let's not error out if modify QP
+ * fails.
+ */
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
+ return 0;
+ }
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void ipoib_cm_init_rx_wr(struct net_device *dev,
+ struct ib_recv_wr *wr,
+ struct ib_sge *sge)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < priv->cm.num_frags; ++i)
+ sge[i].lkey = priv->pd->local_dma_lkey;
+
+ sge[0].length = IPOIB_CM_HEAD_SIZE;
+ for (i = 1; i < priv->cm.num_frags; ++i)
+ sge[i].length = PAGE_SIZE;
+
+ wr->next = NULL;
+ wr->sg_list = sge;
+ wr->num_sge = priv->cm.num_frags;
+}
+
+static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
+ struct ipoib_cm_rx *rx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct {
+ struct ib_recv_wr wr;
+ struct ib_sge sge[IPOIB_CM_RX_SG];
+ } *t;
+ int ret;
+ int i;
+
+ rx->rx_ring = vzalloc(array_size(ipoib_recvq_size,
+ sizeof(*rx->rx_ring)));
+ if (!rx->rx_ring)
+ return -ENOMEM;
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ ret = -ENOMEM;
+ goto err_free_1;
+ }
+
+ ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
+
+ spin_lock_irq(&priv->lock);
+
+ if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
+ spin_unlock_irq(&priv->lock);
+ ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
+ ret = -EINVAL;
+ goto err_free;
+ } else
+ ++priv->cm.nonsrq_conn_qp;
+
+ spin_unlock_irq(&priv->lock);
+
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
+ rx->rx_ring[i].mapping,
+ GFP_KERNEL)) {
+ ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+ ret = -ENOMEM;
+ goto err_count;
+ }
+ ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
+ "failed for buf %d\n", i);
+ ret = -EIO;
+ goto err_count;
+ }
+ }
+
+ rx->recv_count = ipoib_recvq_size;
+
+ kfree(t);
+
+ return 0;
+
+err_count:
+ spin_lock_irq(&priv->lock);
+ --priv->cm.nonsrq_conn_qp;
+ spin_unlock_irq(&priv->lock);
+
+err_free:
+ kfree(t);
+
+err_free_1:
+ ipoib_cm_free_rx_ring(dev, rx->rx_ring);
+
+ return ret;
+}
+
+static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
+ struct ib_qp *qp,
+ const struct ib_cm_req_event_param *req,
+ unsigned int psn)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_data data = {};
+ struct ib_cm_rep_param rep = {};
+
+ data.qpn = cpu_to_be32(priv->qp->qp_num);
+ data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
+
+ rep.private_data = &data;
+ rep.private_data_len = sizeof(data);
+ rep.flow_control = 0;
+ rep.rnr_retry_count = req->rnr_retry_count;
+ rep.srq = ipoib_cm_has_srq(dev);
+ rep.qp_num = qp->qp_num;
+ rep.starting_psn = psn;
+ return ib_send_cm_rep(cm_id, &rep);
+}
+
+static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct net_device *dev = cm_id->context;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx *p;
+ unsigned int psn;
+ int ret;
+
+ ipoib_dbg(priv, "REQ arrived\n");
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ p->dev = dev;
+ p->id = cm_id;
+ cm_id->context = p;
+ p->state = IPOIB_CM_RX_LIVE;
+ p->jiffies = jiffies;
+ INIT_LIST_HEAD(&p->list);
+
+ p->qp = ipoib_cm_create_rx_qp(dev, p);
+ if (IS_ERR(p->qp)) {
+ ret = PTR_ERR(p->qp);
+ goto err_qp;
+ }
+
+ psn = get_random_u32() & 0xffffff;
+ ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
+ if (ret)
+ goto err_modify;
+
+ if (!ipoib_cm_has_srq(dev)) {
+ ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
+ if (ret)
+ goto err_modify;
+ }
+
+ spin_lock_irq(&priv->lock);
+ queue_delayed_work(priv->wq,
+ &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+ /* Add this entry to passive ids list head, but do not re-add it
+ * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
+ p->jiffies = jiffies;
+ if (p->state == IPOIB_CM_RX_LIVE)
+ list_move(&p->list, &priv->cm.passive_ids);
+ spin_unlock_irq(&priv->lock);
+
+ ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
+ if (ret) {
+ ipoib_warn(priv, "failed to send REP: %d\n", ret);
+ if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+ ipoib_warn(priv, "unable to move qp to error state\n");
+ }
+ return 0;
+
+err_modify:
+ ib_destroy_qp(p->qp);
+err_qp:
+ kfree(p);
+ return ret;
+}
+
+static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct ipoib_cm_rx *p;
+ struct ipoib_dev_priv *priv;
+
+ switch (event->event) {
+ case IB_CM_REQ_RECEIVED:
+ return ipoib_cm_req_handler(cm_id, event);
+ case IB_CM_DREQ_RECEIVED:
+ ib_send_cm_drep(cm_id, NULL, 0);
+ fallthrough;
+ case IB_CM_REJ_RECEIVED:
+ p = cm_id->context;
+ priv = ipoib_priv(p->dev);
+ if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+ ipoib_warn(priv, "unable to move qp to error state\n");
+ fallthrough;
+ default:
+ return 0;
+ }
+}
+/* Adjust length of skb with fragments to match received data */
+static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+ unsigned int length, struct sk_buff *toskb)
+{
+ int i, num_frags;
+ unsigned int size;
+
+ /* put header into skb */
+ size = min(length, hdr_space);
+ skb->tail += size;
+ skb->len += size;
+ length -= size;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ if (length == 0) {
+ /* don't need this page */
+ skb_fill_page_desc(toskb, i, skb_frag_page(frag),
+ 0, PAGE_SIZE);
+ --skb_shinfo(skb)->nr_frags;
+ } else {
+ size = min_t(unsigned int, length, PAGE_SIZE);
+
+ skb_frag_size_set(frag, size);
+ skb->data_len += size;
+ skb->truesize += size;
+ skb->len += size;
+ length -= size;
+ }
+ }
+}
+
+void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx_buf *rx_ring;
+ unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
+ struct sk_buff *skb, *newskb;
+ struct ipoib_cm_rx *p;
+ unsigned long flags;
+ u64 mapping[IPOIB_CM_RX_SG];
+ int frags;
+ int has_srq;
+ struct sk_buff *small_skb;
+
+ ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_recvq_size)) {
+ if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
+ ipoib_cm_start_rx_drain(priv);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else
+ ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_recvq_size);
+ return;
+ }
+
+ p = wc->qp->qp_context;
+
+ has_srq = ipoib_cm_has_srq(dev);
+ rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
+
+ skb = rx_ring[wr_id].skb;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ ipoib_dbg(priv,
+ "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ ++dev->stats.rx_dropped;
+ if (has_srq)
+ goto repost;
+ else {
+ if (!--p->recv_count) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_move(&p->list, &priv->cm.rx_reap_list);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
+ }
+ return;
+ }
+ }
+
+ if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
+ if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ p->jiffies = jiffies;
+ /* Move this entry to list head, but do not re-add it
+ * if it has been moved out of list. */
+ if (p->state == IPOIB_CM_RX_LIVE)
+ list_move(&p->list, &priv->cm.passive_ids);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ if (wc->byte_len < IPOIB_CM_COPYBREAK) {
+ int dlen = wc->byte_len;
+
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
+ if (small_skb) {
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
+ ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
+ dlen, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(skb, small_skb->data, dlen);
+ ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
+ dlen, DMA_FROM_DEVICE);
+ skb_put(small_skb, dlen);
+ skb = small_skb;
+ goto copied;
+ }
+ }
+
+ frags = PAGE_ALIGN(wc->byte_len -
+ min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) /
+ PAGE_SIZE;
+
+ newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
+ mapping, GFP_ATOMIC);
+ if (unlikely(!newskb)) {
+ /*
+ * If we can't allocate a new RX buffer, dump
+ * this packet and reuse the old buffer.
+ */
+ ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
+ ++dev->stats.rx_dropped;
+ goto repost;
+ }
+
+ ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
+ memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
+
+ ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ wc->byte_len, wc->slid);
+
+ skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
+
+copied:
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+
+ skb->dev = dev;
+ /* XXX get correct PACKET_ type here */
+ skb->pkt_type = PACKET_HOST;
+ netif_receive_skb(skb);
+
+repost:
+ if (has_srq) {
+ if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
+ ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
+ "for buf %d\n", wr_id);
+ } else {
+ if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
+ &priv->cm.rx_wr,
+ priv->cm.rx_sge,
+ wr_id))) {
+ --p->recv_count;
+ ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
+ "for buf %d\n", wr_id);
+ }
+ }
+}
+
+static inline int post_send(struct ipoib_dev_priv *priv,
+ struct ipoib_cm_tx *tx,
+ unsigned int wr_id,
+ struct ipoib_tx_buf *tx_req)
+{
+ ipoib_build_sge(priv, tx_req);
+
+ priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
+
+ return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL);
+}
+
+void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_tx_buf *tx_req;
+ int rc;
+ unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
+
+ if (unlikely(skb->len > tx->mtu)) {
+ ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len, tx->mtu);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
+ return;
+ }
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
+ ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
+ tx->tx_head, skb->len, tx->qp->qp_num);
+
+ /*
+ * We put the skb into the tx_ring _before_ we call post_send()
+ * because it's entirely possible that the completion handler will
+ * run before we execute anything after the post_send(). That
+ * means we have to make sure everything is properly recorded and
+ * our state is consistent before we call post_send().
+ */
+ tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
+ tx_req->skb = skb;
+
+ if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
+ netif_stop_queue(dev);
+ }
+
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
+ if (netif_queue_stopped(dev)) {
+ rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS);
+ if (unlikely(rc < 0))
+ ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ else if (rc)
+ napi_schedule(&priv->send_napi);
+ }
+
+ rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
+ if (unlikely(rc)) {
+ ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
+ ++dev->stats.tx_errors;
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(skb);
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } else {
+ netif_trans_update(dev);
+ ++tx->tx_head;
+ ++priv->global_tx_head;
+ }
+}
+
+void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_tx *tx = wc->qp->qp_context;
+ unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
+ struct ipoib_tx_buf *tx_req;
+ unsigned long flags;
+
+ ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_sendq_size)) {
+ ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_sendq_size);
+ return;
+ }
+
+ tx_req = &tx->tx_ring[wr_id];
+
+ ipoib_dma_unmap_tx(priv, tx_req);
+
+ /* FIXME: is this right? Shouldn't we only increment on success? */
+ ++dev->stats.tx_packets;
+ dev->stats.tx_bytes += tx_req->skb->len;
+
+ dev_kfree_skb_any(tx_req->skb);
+
+ netif_tx_lock(dev);
+
+ ++tx->tx_tail;
+ ++priv->global_tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
+ netif_wake_queue(dev);
+
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct ipoib_neigh *neigh;
+
+ /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
+ * so don't make waves.
+ */
+ if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
+ wc->status == IB_WC_RETRY_EXC_ERR)
+ ipoib_dbg(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
+ else
+ ipoib_warn(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ neigh = tx->neigh;
+
+ if (neigh) {
+ neigh->cm = NULL;
+ ipoib_neigh_free(neigh);
+
+ tx->neigh = NULL;
+ }
+
+ if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+ list_move(&tx->list, &priv->cm.reap_list);
+ queue_work(priv->wq, &priv->cm.reap_task);
+ }
+
+ clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ netif_tx_unlock(dev);
+}
+
+int ipoib_cm_dev_open(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
+ return 0;
+
+ priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
+ if (IS_ERR(priv->cm.id)) {
+ pr_warn("%s: failed to create CM ID\n", priv->ca->name);
+ ret = PTR_ERR(priv->cm.id);
+ goto err_cm;
+ }
+
+ ret = ib_cm_listen(priv->cm.id,
+ cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num));
+ if (ret) {
+ pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
+ IPOIB_CM_IETF_ID | priv->qp->qp_num);
+ goto err_listen;
+ }
+
+ return 0;
+
+err_listen:
+ ib_destroy_cm_id(priv->cm.id);
+err_cm:
+ priv->cm.id = NULL;
+ return ret;
+}
+
+static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx *rx, *n;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&priv->lock);
+ list_splice_init(&priv->cm.rx_reap_list, &list);
+ spin_unlock_irq(&priv->lock);
+
+ list_for_each_entry_safe(rx, n, &list, list) {
+ ib_destroy_cm_id(rx->id);
+ ib_destroy_qp(rx->qp);
+ if (!ipoib_cm_has_srq(dev)) {
+ ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
+ spin_lock_irq(&priv->lock);
+ --priv->cm.nonsrq_conn_qp;
+ spin_unlock_irq(&priv->lock);
+ }
+ kfree(rx);
+ }
+}
+
+void ipoib_cm_dev_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx *p;
+ unsigned long begin;
+ int ret;
+
+ if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
+ return;
+
+ ib_destroy_cm_id(priv->cm.id);
+ priv->cm.id = NULL;
+
+ spin_lock_irq(&priv->lock);
+ while (!list_empty(&priv->cm.passive_ids)) {
+ p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
+ list_move(&p->list, &priv->cm.rx_error_list);
+ p->state = IPOIB_CM_RX_ERROR;
+ spin_unlock_irq(&priv->lock);
+ ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
+ if (ret)
+ ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
+ spin_lock_irq(&priv->lock);
+ }
+
+ /* Wait for all RX to be drained */
+ begin = jiffies;
+
+ while (!list_empty(&priv->cm.rx_error_list) ||
+ !list_empty(&priv->cm.rx_flush_list) ||
+ !list_empty(&priv->cm.rx_drain_list)) {
+ if (time_after(jiffies, begin + 5 * HZ)) {
+ ipoib_warn(priv, "RX drain timing out\n");
+
+ /*
+ * assume the HW is wedged and just free up everything.
+ */
+ list_splice_init(&priv->cm.rx_flush_list,
+ &priv->cm.rx_reap_list);
+ list_splice_init(&priv->cm.rx_error_list,
+ &priv->cm.rx_reap_list);
+ list_splice_init(&priv->cm.rx_drain_list,
+ &priv->cm.rx_reap_list);
+ break;
+ }
+ spin_unlock_irq(&priv->lock);
+ usleep_range(1000, 2000);
+ ipoib_drain_cq(dev);
+ spin_lock_irq(&priv->lock);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ ipoib_cm_free_rx_reap_list(dev);
+
+ cancel_delayed_work(&priv->cm.stale_task);
+}
+
+static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct ipoib_cm_tx *p = cm_id->context;
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ struct ipoib_cm_data *data = event->private_data;
+ struct sk_buff_head skqueue;
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+ struct sk_buff *skb;
+
+ p->mtu = be32_to_cpu(data->mtu);
+
+ if (p->mtu <= IPOIB_ENCAP_LEN) {
+ ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
+ p->mtu, IPOIB_ENCAP_LEN);
+ return -EINVAL;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
+ return ret;
+ }
+
+ qp_attr.rq_psn = 0 /* FIXME */;
+ ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
+ return ret;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
+ return ret;
+ }
+ ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
+ return ret;
+ }
+
+ skb_queue_head_init(&skqueue);
+
+ netif_tx_lock_bh(p->dev);
+ spin_lock_irq(&priv->lock);
+ set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
+ if (p->neigh)
+ while ((skb = __skb_dequeue(&p->neigh->queue)))
+ __skb_queue_tail(&skqueue, skb);
+ spin_unlock_irq(&priv->lock);
+ netif_tx_unlock_bh(p->dev);
+
+ while ((skb = __skb_dequeue(&skqueue))) {
+ skb->dev = p->dev;
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
+ }
+
+ ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ if (ret) {
+ ipoib_warn(priv, "failed to send RTU: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_init_attr attr = {
+ .send_cq = priv->send_cq,
+ .recv_cq = priv->recv_cq,
+ .srq = priv->cm.srq,
+ .cap.max_send_wr = ipoib_sendq_size,
+ .cap.max_send_sge = 1,
+ .sq_sig_type = IB_SIGNAL_ALL_WR,
+ .qp_type = IB_QPT_RC,
+ .qp_context = tx,
+ .create_flags = 0
+ };
+ struct ib_qp *tx_qp;
+
+ if (dev->features & NETIF_F_SG)
+ attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1);
+
+ tx_qp = ib_create_qp(priv->pd, &attr);
+ tx->max_send_sge = attr.cap.max_send_sge;
+ return tx_qp;
+}
+
+static int ipoib_cm_send_req(struct net_device *dev,
+ struct ib_cm_id *id, struct ib_qp *qp,
+ u32 qpn,
+ struct sa_path_rec *pathrec)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_data data = {};
+ struct ib_cm_req_param req = {};
+
+ data.qpn = cpu_to_be32(priv->qp->qp_num);
+ data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
+
+ req.primary_path = pathrec;
+ req.alternate_path = NULL;
+ req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
+ req.qp_num = qp->qp_num;
+ req.qp_type = qp->qp_type;
+ req.private_data = &data;
+ req.private_data_len = sizeof(data);
+ req.flow_control = 0;
+
+ req.starting_psn = 0; /* FIXME */
+
+ /*
+ * Pick some arbitrary defaults here; we could make these
+ * module parameters if anyone cared about setting them.
+ */
+ req.responder_resources = 4;
+ req.remote_cm_response_timeout = 20;
+ req.local_cm_response_timeout = 20;
+ req.retry_count = 0; /* RFC draft warns against retries */
+ req.rnr_retry_count = 0; /* RFC draft warns against retries */
+ req.max_cm_retries = 15;
+ req.srq = ipoib_cm_has_srq(dev);
+ return ib_send_cm_req(id, &req);
+}
+
+static int ipoib_cm_modify_tx_init(struct net_device *dev,
+ struct ib_cm_id *cm_id, struct ib_qp *qp)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+
+ qp_attr.pkey_index = priv->pkey_index;
+ qp_attr.qp_state = IB_QPS_INIT;
+ qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ qp_attr.port_num = priv->port;
+ qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
+ struct sa_path_rec *pathrec)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned int noio_flag;
+ int ret;
+
+ noio_flag = memalloc_noio_save();
+ p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring)));
+ if (!p->tx_ring) {
+ memalloc_noio_restore(noio_flag);
+ ret = -ENOMEM;
+ goto err_tx;
+ }
+
+ p->qp = ipoib_cm_create_tx_qp(p->dev, p);
+ memalloc_noio_restore(noio_flag);
+ if (IS_ERR(p->qp)) {
+ ret = PTR_ERR(p->qp);
+ ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
+ goto err_qp;
+ }
+
+ p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
+ if (IS_ERR(p->id)) {
+ ret = PTR_ERR(p->id);
+ ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
+ goto err_id;
+ }
+
+ ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
+ goto err_modify_send;
+ }
+
+ ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
+ if (ret) {
+ ipoib_warn(priv, "failed to send cm req: %d\n", ret);
+ goto err_modify_send;
+ }
+
+ ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
+ p->qp->qp_num, pathrec->dgid.raw, qpn);
+
+ return 0;
+
+err_modify_send:
+ ib_destroy_cm_id(p->id);
+err_id:
+ p->id = NULL;
+ ib_destroy_qp(p->qp);
+err_qp:
+ p->qp = NULL;
+ vfree(p->tx_ring);
+err_tx:
+ return ret;
+}
+
+static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ struct ipoib_tx_buf *tx_req;
+ unsigned long begin;
+
+ ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
+ p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
+
+ if (p->id)
+ ib_destroy_cm_id(p->id);
+
+ if (p->tx_ring) {
+ /* Wait for all sends to complete */
+ begin = jiffies;
+ while ((int) p->tx_tail - (int) p->tx_head < 0) {
+ if (time_after(jiffies, begin + 5 * HZ)) {
+ ipoib_warn(priv, "timing out; %d sends not completed\n",
+ p->tx_head - p->tx_tail);
+ goto timeout;
+ }
+
+ usleep_range(1000, 2000);
+ }
+ }
+
+timeout:
+
+ while ((int) p->tx_tail - (int) p->tx_head < 0) {
+ tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(tx_req->skb);
+ netif_tx_lock_bh(p->dev);
+ ++p->tx_tail;
+ ++priv->global_tx_tail;
+ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ netif_queue_stopped(p->dev) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ netif_wake_queue(p->dev);
+ netif_tx_unlock_bh(p->dev);
+ }
+
+ if (p->qp)
+ ib_destroy_qp(p->qp);
+
+ vfree(p->tx_ring);
+ kfree(p);
+}
+
+static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct ipoib_cm_tx *tx = cm_id->context;
+ struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
+ struct net_device *dev = priv->dev;
+ struct ipoib_neigh *neigh;
+ unsigned long flags;
+ int ret;
+
+ switch (event->event) {
+ case IB_CM_DREQ_RECEIVED:
+ ipoib_dbg(priv, "DREQ received.\n");
+ ib_send_cm_drep(cm_id, NULL, 0);
+ break;
+ case IB_CM_REP_RECEIVED:
+ ipoib_dbg(priv, "REP received.\n");
+ ret = ipoib_cm_rep_handler(cm_id, event);
+ if (ret)
+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
+ NULL, 0, NULL, 0);
+ break;
+ case IB_CM_REQ_ERROR:
+ case IB_CM_REJ_RECEIVED:
+ case IB_CM_TIMEWAIT_EXIT:
+ ipoib_dbg(priv, "CM error %d.\n", event->event);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ neigh = tx->neigh;
+
+ if (neigh) {
+ neigh->cm = NULL;
+ ipoib_neigh_free(neigh);
+
+ tx->neigh = NULL;
+ }
+
+ if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+ list_move(&tx->list, &priv->cm.reap_list);
+ queue_work(priv->wq, &priv->cm.reap_task);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
+ struct ipoib_neigh *neigh)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_tx *tx;
+
+ tx = kzalloc(sizeof(*tx), GFP_ATOMIC);
+ if (!tx)
+ return NULL;
+
+ neigh->cm = tx;
+ tx->neigh = neigh;
+ tx->dev = dev;
+ list_add(&tx->list, &priv->cm.start_list);
+ set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
+ queue_work(priv->wq, &priv->cm.start_task);
+ return tx;
+}
+
+void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
+ unsigned long flags;
+ if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_move(&tx->list, &priv->cm.reap_list);
+ queue_work(priv->wq, &priv->cm.reap_task);
+ ipoib_dbg(priv, "Reap connection for gid %pI6\n",
+ tx->neigh->daddr + 4);
+ tx->neigh = NULL;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+}
+
+#define QPN_AND_OPTIONS_OFFSET 4
+
+static void ipoib_cm_tx_start(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.start_task);
+ struct net_device *dev = priv->dev;
+ struct ipoib_neigh *neigh;
+ struct ipoib_cm_tx *p;
+ unsigned long flags;
+ struct ipoib_path *path;
+ int ret;
+
+ struct sa_path_rec pathrec;
+ u32 qpn;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ while (!list_empty(&priv->cm.start_list)) {
+ p = list_entry(priv->cm.start_list.next, typeof(*p), list);
+ list_del_init(&p->list);
+ neigh = p->neigh;
+
+ qpn = IPOIB_QPN(neigh->daddr);
+ /*
+ * As long as the search is with these 2 locks,
+ * path existence indicates its validity.
+ */
+ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ if (!path) {
+ pr_info("%s ignore not valid path %pI6\n",
+ __func__,
+ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ goto free_neigh;
+ }
+ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
+ ret = ipoib_cm_tx_init(p, qpn, &pathrec);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ret) {
+free_neigh:
+ neigh = p->neigh;
+ if (neigh) {
+ neigh->cm = NULL;
+ ipoib_neigh_free(neigh);
+ }
+ list_del(&p->list);
+ kfree(p);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+static void ipoib_cm_tx_reap(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.reap_task);
+ struct net_device *dev = priv->dev;
+ struct ipoib_cm_tx *p;
+ unsigned long flags;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ while (!list_empty(&priv->cm.reap_list)) {
+ p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
+ list_del_init(&p->list);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+ ipoib_cm_tx_destroy(p);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+static void ipoib_cm_skb_reap(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.skb_task);
+ struct net_device *dev = priv->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+ unsigned int mtu = priv->mcast_mtu;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ }
+#endif
+ dev_kfree_skb_any(skb);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ unsigned int mtu)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int e = skb_queue_empty(&priv->cm.skb_queue);
+
+ skb_dst_update_pmtu(skb, mtu);
+
+ skb_queue_tail(&priv->cm.skb_queue, skb);
+ if (e)
+ queue_work(priv->wq, &priv->cm.skb_task);
+}
+
+static void ipoib_cm_rx_reap(struct work_struct *work)
+{
+ ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
+ cm.rx_reap_task)->dev);
+}
+
+static void ipoib_cm_stale_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.stale_task.work);
+ struct ipoib_cm_rx *p;
+ int ret;
+
+ spin_lock_irq(&priv->lock);
+ while (!list_empty(&priv->cm.passive_ids)) {
+ /* List is sorted by LRU, start from tail,
+ * stop when we see a recently used entry */
+ p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
+ if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
+ break;
+ list_move(&p->list, &priv->cm.rx_error_list);
+ p->state = IPOIB_CM_RX_ERROR;
+ spin_unlock_irq(&priv->lock);
+ ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
+ if (ret)
+ ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
+ spin_lock_irq(&priv->lock);
+ }
+
+ if (!list_empty(&priv->cm.passive_ids))
+ queue_delayed_work(priv->wq,
+ &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+ spin_unlock_irq(&priv->lock);
+}
+
+static ssize_t mode_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
+ return sysfs_emit(buf, "connected\n");
+ else
+ return sysfs_emit(buf, "datagram\n");
+}
+
+static ssize_t mode_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *dev = to_net_dev(d);
+ int ret;
+
+ if (!rtnl_trylock()) {
+ return restart_syscall();
+ }
+
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
+ ret = ipoib_set_mode(dev, buf);
+
+ /* The assumption is that the function ipoib_set_mode returned
+ * with the rtnl held by it, if not the value -EBUSY returned,
+ * then no need to rtnl_unlock
+ */
+ if (ret != -EBUSY)
+ rtnl_unlock();
+
+ return (!ret || ret == -EBUSY) ? count : ret;
+}
+
+static DEVICE_ATTR_RW(mode);
+
+int ipoib_cm_add_mode_attr(struct net_device *dev)
+{
+ return device_create_file(&dev->dev, &dev_attr_mode);
+}
+
+static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_srq_init_attr srq_init_attr = {
+ .srq_type = IB_SRQT_BASIC,
+ .attr = {
+ .max_wr = ipoib_recvq_size,
+ .max_sge = max_sge
+ }
+ };
+
+ priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
+ if (IS_ERR(priv->cm.srq)) {
+ if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP)
+ pr_warn("%s: failed to allocate SRQ, error %ld\n",
+ priv->ca->name, PTR_ERR(priv->cm.srq));
+ priv->cm.srq = NULL;
+ return;
+ }
+
+ priv->cm.srq_ring = vzalloc(array_size(ipoib_recvq_size,
+ sizeof(*priv->cm.srq_ring)));
+ if (!priv->cm.srq_ring) {
+ ib_destroy_srq(priv->cm.srq);
+ priv->cm.srq = NULL;
+ return;
+ }
+
+}
+
+int ipoib_cm_dev_init(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int max_srq_sge, i;
+ u8 addr;
+
+ INIT_LIST_HEAD(&priv->cm.passive_ids);
+ INIT_LIST_HEAD(&priv->cm.reap_list);
+ INIT_LIST_HEAD(&priv->cm.start_list);
+ INIT_LIST_HEAD(&priv->cm.rx_error_list);
+ INIT_LIST_HEAD(&priv->cm.rx_flush_list);
+ INIT_LIST_HEAD(&priv->cm.rx_drain_list);
+ INIT_LIST_HEAD(&priv->cm.rx_reap_list);
+ INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
+ INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
+ INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
+ INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
+ INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
+
+ skb_queue_head_init(&priv->cm.skb_queue);
+
+ ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
+
+ max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
+ ipoib_cm_create_srq(dev, max_srq_sge);
+ if (ipoib_cm_has_srq(dev)) {
+ priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
+ priv->cm.num_frags = max_srq_sge;
+ ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
+ priv->cm.max_cm_mtu, priv->cm.num_frags);
+ } else {
+ priv->cm.max_cm_mtu = IPOIB_CM_MTU;
+ priv->cm.num_frags = IPOIB_CM_RX_SG;
+ }
+
+ ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
+
+ if (ipoib_cm_has_srq(dev)) {
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
+ priv->cm.num_frags - 1,
+ priv->cm.srq_ring[i].mapping,
+ GFP_KERNEL)) {
+ ipoib_warn(priv, "failed to allocate "
+ "receive buffer %d\n", i);
+ ipoib_cm_dev_cleanup(dev);
+ return -ENOMEM;
+ }
+
+ if (ipoib_cm_post_receive_srq(dev, i)) {
+ ipoib_warn(priv, "ipoib_cm_post_receive_srq "
+ "failed for buf %d\n", i);
+ ipoib_cm_dev_cleanup(dev);
+ return -EIO;
+ }
+ }
+ }
+
+ addr = IPOIB_FLAGS_RC;
+ dev_addr_mod(dev, 0, &addr, 1);
+ return 0;
+}
+
+void ipoib_cm_dev_cleanup(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->cm.srq)
+ return;
+
+ ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
+
+ ib_destroy_srq(priv->cm.srq);
+ priv->cm.srq = NULL;
+ if (!priv->cm.srq_ring)
+ return;
+
+ ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
+ priv->cm.srq_ring = NULL;
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
new file mode 100644
index 000000000..8af99b18d
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "ipoib.h"
+
+struct ipoib_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define IPOIB_NETDEV_STAT(m) { \
+ .stat_string = #m, \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct ipoib_stats ipoib_gstrings_stats[] = {
+ IPOIB_NETDEV_STAT(rx_packets),
+ IPOIB_NETDEV_STAT(tx_packets),
+ IPOIB_NETDEV_STAT(rx_bytes),
+ IPOIB_NETDEV_STAT(tx_bytes),
+ IPOIB_NETDEV_STAT(tx_errors),
+ IPOIB_NETDEV_STAT(rx_dropped),
+ IPOIB_NETDEV_STAT(tx_dropped),
+ IPOIB_NETDEV_STAT(multicast),
+};
+
+#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
+
+static void ipoib_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(netdev);
+
+ ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
+
+ strscpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
+ sizeof(drvinfo->bus_info));
+
+ strscpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
+}
+
+static int ipoib_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
+ coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
+
+ return 0;
+}
+
+static int ipoib_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ /*
+ * These values are saved in the private data and returned
+ * when ipoib_get_coalesce() is called
+ */
+ if (coal->rx_coalesce_usecs > 0xffff ||
+ coal->rx_max_coalesced_frames > 0xffff)
+ return -EINVAL;
+
+ ret = rdma_set_cq_moderation(priv->recv_cq,
+ coal->rx_max_coalesced_frames,
+ coal->rx_coalesce_usecs);
+ if (ret && ret != -EOPNOTSUPP) {
+ ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
+ return ret;
+ }
+
+ priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
+ priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
+
+ return 0;
+}
+static void ipoib_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ int i;
+ struct net_device_stats *net_stats = &dev->stats;
+ u8 *p = (u8 *)net_stats;
+
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
+ data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
+
+}
+static void ipoib_get_strings(struct net_device __always_unused *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ipoib_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ break;
+ }
+}
+static int ipoib_get_sset_count(struct net_device __always_unused *dev,
+ int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IPOIB_GLOBAL_STATS_LEN;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/* Return lane speed in unit of 1e6 bit/sec */
+static inline int ib_speed_enum_to_int(int speed)
+{
+ switch (speed) {
+ case IB_SPEED_SDR:
+ return SPEED_2500;
+ case IB_SPEED_DDR:
+ return SPEED_5000;
+ case IB_SPEED_QDR:
+ case IB_SPEED_FDR10:
+ return SPEED_10000;
+ case IB_SPEED_FDR:
+ return SPEED_14000;
+ case IB_SPEED_EDR:
+ return SPEED_25000;
+ case IB_SPEED_HDR:
+ return SPEED_50000;
+ case IB_SPEED_NDR:
+ return SPEED_100000;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+static int ipoib_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(netdev);
+ struct ib_port_attr attr;
+ int ret, speed, width;
+
+ if (!netif_carrier_ok(netdev)) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+
+ ret = ib_query_port(priv->ca, priv->port, &attr);
+ if (ret < 0)
+ return -EINVAL;
+
+ speed = ib_speed_enum_to_int(attr.active_speed);
+ width = ib_width_enum_to_int(attr.active_width);
+
+ if (speed < 0 || width < 0)
+ return -EINVAL;
+
+ /* Except the following are set, the other members of
+ * the struct ethtool_link_settings are initialized to
+ * zero in the function __ethtool_get_link_ksettings.
+ */
+ cmd->base.speed = speed * width;
+ cmd->base.duplex = DUPLEX_FULL;
+
+ cmd->base.phy_address = 0xFF;
+
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
+
+static const struct ethtool_ops ipoib_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .get_link_ksettings = ipoib_get_link_ksettings,
+ .get_drvinfo = ipoib_get_drvinfo,
+ .get_coalesce = ipoib_get_coalesce,
+ .set_coalesce = ipoib_set_coalesce,
+ .get_strings = ipoib_get_strings,
+ .get_ethtool_stats = ipoib_get_ethtool_stats,
+ .get_sset_count = ipoib_get_sset_count,
+ .get_link = ethtool_op_get_link,
+};
+
+void ipoib_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &ipoib_ethtool_ops;
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
new file mode 100644
index 000000000..12ba7a0fe
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+struct file_operations;
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ipoib.h"
+
+static struct dentry *ipoib_root;
+
+static void format_gid(union ib_gid *gid, char *buf)
+{
+ int i, n;
+
+ for (n = 0, i = 0; i < 8; ++i) {
+ n += sprintf(buf + n, "%x",
+ be16_to_cpu(((__be16 *) gid->raw)[i]));
+ if (i < 7)
+ buf[n++] = ':';
+ }
+}
+
+static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
+{
+ struct ipoib_mcast_iter *iter;
+ loff_t n = *pos;
+
+ iter = ipoib_mcast_iter_init(file->private);
+ if (!iter)
+ return NULL;
+
+ while (n--) {
+ if (ipoib_mcast_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ }
+
+ return iter;
+}
+
+static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr,
+ loff_t *pos)
+{
+ struct ipoib_mcast_iter *iter = iter_ptr;
+
+ (*pos)++;
+
+ if (ipoib_mcast_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+static void ipoib_mcg_seq_stop(struct seq_file *file, void *iter_ptr)
+{
+ /* nothing for now */
+}
+
+static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
+{
+ struct ipoib_mcast_iter *iter = iter_ptr;
+ char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
+ union ib_gid mgid;
+ unsigned long created;
+ unsigned int queuelen, complete, send_only;
+
+ if (!iter)
+ return 0;
+
+ ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
+ &complete, &send_only);
+
+ format_gid(&mgid, gid_buf);
+
+ seq_printf(file,
+ "GID: %s\n"
+ " created: %10ld\n"
+ " queuelen: %9d\n"
+ " complete: %9s\n"
+ " send_only: %8s\n"
+ "\n",
+ gid_buf, created, queuelen,
+ complete ? "yes" : "no",
+ send_only ? "yes" : "no");
+
+ return 0;
+}
+
+static const struct seq_operations ipoib_mcg_sops = {
+ .start = ipoib_mcg_seq_start,
+ .next = ipoib_mcg_seq_next,
+ .stop = ipoib_mcg_seq_stop,
+ .show = ipoib_mcg_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(ipoib_mcg);
+
+static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
+{
+ struct ipoib_path_iter *iter;
+ loff_t n = *pos;
+
+ iter = ipoib_path_iter_init(file->private);
+ if (!iter)
+ return NULL;
+
+ while (n--) {
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ }
+
+ return iter;
+}
+
+static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr,
+ loff_t *pos)
+{
+ struct ipoib_path_iter *iter = iter_ptr;
+
+ (*pos)++;
+
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr)
+{
+ /* nothing for now */
+}
+
+static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
+{
+ struct ipoib_path_iter *iter = iter_ptr;
+ char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
+ struct ipoib_path path;
+ int rate;
+
+ if (!iter)
+ return 0;
+
+ ipoib_path_iter_read(iter, &path);
+
+ format_gid(&path.pathrec.dgid, gid_buf);
+
+ seq_printf(file,
+ "GID: %s\n"
+ " complete: %6s\n",
+ gid_buf, sa_path_get_dlid(&path.pathrec) ? "yes" : "no");
+
+ if (sa_path_get_dlid(&path.pathrec)) {
+ rate = ib_rate_to_mbps(path.pathrec.rate);
+
+ seq_printf(file,
+ " DLID: 0x%04x\n"
+ " SL: %12d\n"
+ " rate: %8d.%d Gb/sec\n",
+ be32_to_cpu(sa_path_get_dlid(&path.pathrec)),
+ path.pathrec.sl,
+ rate / 1000, rate % 1000);
+ }
+
+ seq_putc(file, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations ipoib_path_sops = {
+ .start = ipoib_path_seq_start,
+ .next = ipoib_path_seq_next,
+ .stop = ipoib_path_seq_stop,
+ .show = ipoib_path_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(ipoib_path);
+
+void ipoib_create_debug_files(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ char name[IFNAMSIZ + sizeof("_path")];
+
+ snprintf(name, sizeof(name), "%s_mcg", dev->name);
+ priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ ipoib_root, dev, &ipoib_mcg_fops);
+
+ snprintf(name, sizeof(name), "%s_path", dev->name);
+ priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ ipoib_root, dev, &ipoib_path_fops);
+}
+
+void ipoib_delete_debug_files(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ debugfs_remove(priv->mcg_dentry);
+ debugfs_remove(priv->path_dentry);
+ priv->mcg_dentry = priv->path_dentry = NULL;
+}
+
+void ipoib_register_debugfs(void)
+{
+ ipoib_root = debugfs_create_dir("ipoib", NULL);
+}
+
+void ipoib_unregister_debugfs(void)
+{
+ debugfs_remove(ipoib_root);
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
new file mode 100644
index 000000000..ed25061fa
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -0,0 +1,1311 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <rdma/ib_cache.h>
+
+#include "ipoib.h"
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+static int data_debug_level;
+
+module_param(data_debug_level, int, 0644);
+MODULE_PARM_DESC(data_debug_level,
+ "Enable data path debug tracing if > 0");
+#endif
+
+struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
+ struct ib_pd *pd, struct rdma_ah_attr *attr)
+{
+ struct ipoib_ah *ah;
+ struct ib_ah *vah;
+
+ ah = kmalloc(sizeof(*ah), GFP_KERNEL);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ah->dev = dev;
+ ah->last_send = 0;
+ kref_init(&ah->ref);
+
+ vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE);
+ if (IS_ERR(vah)) {
+ kfree(ah);
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
+ ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
+ }
+
+ return ah;
+}
+
+void ipoib_free_ah(struct kref *kref)
+{
+ struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
+ struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add_tail(&ah->list, &priv->dead_ahs);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
+ u64 mapping[IPOIB_UD_RX_SG])
+{
+ ib_dma_unmap_single(priv->ca, mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
+}
+
+static int ipoib_ib_post_receive(struct net_device *dev, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+ priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
+ priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
+
+
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
+ dev_kfree_skb_any(priv->rx_ring[id].skb);
+ priv->rx_ring[id].skb = NULL;
+ }
+
+ return ret;
+}
+
+static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sk_buff *skb;
+ int buf_size;
+ u64 *mapping;
+
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
+ */
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
+
+ mapping = priv->rx_ring[id].mapping;
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
+ goto error;
+
+ priv->rx_ring[id].skb = skb;
+ return skb;
+error:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static int ipoib_ib_post_receives(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ if (!ipoib_alloc_rx_skb(dev, i)) {
+ ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+ return -ENOMEM;
+ }
+ if (ipoib_ib_post_receive(dev, i)) {
+ ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
+ struct sk_buff *skb;
+ u64 mapping[IPOIB_UD_RX_SG];
+ union ib_gid *dgid;
+ union ib_gid *sgid;
+
+ ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_recvq_size)) {
+ ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_recvq_size);
+ return;
+ }
+
+ skb = priv->rx_ring[wr_id].skb;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ ipoib_warn(priv,
+ "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
+ dev_kfree_skb_any(skb);
+ priv->rx_ring[wr_id].skb = NULL;
+ return;
+ }
+
+ memcpy(mapping, priv->rx_ring[wr_id].mapping,
+ IPOIB_UD_RX_SG * sizeof(*mapping));
+
+ /*
+ * If we can't allocate a new RX buffer, dump
+ * this packet and reuse the old buffer.
+ */
+ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
+ ++dev->stats.rx_dropped;
+ goto repost;
+ }
+
+ ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ wc->byte_len, wc->slid);
+
+ ipoib_ud_dma_unmap_rx(priv, mapping);
+
+ skb_put(skb, wc->byte_len);
+
+ /* First byte of dgid signals multicast when 0xff */
+ dgid = &((struct ib_grh *)skb->data)->dgid;
+
+ if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
+ skb->pkt_type = PACKET_HOST;
+ else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+
+ sgid = &((struct ib_grh *)skb->data)->sgid;
+
+ /*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
+ int need_repost = 1;
+
+ if ((wc->wc_flags & IB_WC_GRH) &&
+ sgid->global.interface_id != priv->local_gid.global.interface_id)
+ need_repost = 0;
+
+ if (need_repost) {
+ dev_kfree_skb_any(skb);
+ goto repost;
+ }
+ }
+
+ skb_pull(skb, IB_GRH_BYTES);
+
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
+
+ skb->dev = dev;
+ if ((dev->features & NETIF_F_RXCSUM) &&
+ likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&priv->recv_napi, skb);
+
+repost:
+ if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
+ ipoib_warn(priv, "ipoib_ib_post_receive failed "
+ "for buf %d\n", wr_id);
+}
+
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
+{
+ struct sk_buff *skb = tx_req->skb;
+ u64 *mapping = tx_req->mapping;
+ int i;
+ int off;
+
+ if (skb_headlen(skb)) {
+ mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
+ return -EIO;
+
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ mapping[i + off] = ib_dma_map_page(ca,
+ skb_frag_page(frag),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
+ goto partial_error;
+ }
+ return 0;
+
+partial_error:
+ for (; i > 0; --i) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ if (off)
+ ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+
+ return -EIO;
+}
+
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
+{
+ struct sk_buff *skb = tx_req->skb;
+ u64 *mapping = tx_req->mapping;
+ int i;
+ int off;
+
+ if (skb_headlen(skb)) {
+ ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
+ DMA_TO_DEVICE);
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ib_dma_unmap_page(priv->ca, mapping[i + off],
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+}
+
+/*
+ * As the result of a completion error the QP Can be transferred to SQE states.
+ * The function checks if the (send)QP is in SQE state and
+ * moves it back to RTS state, that in order to have it functional again.
+ */
+static void ipoib_qp_state_validate_work(struct work_struct *work)
+{
+ struct ipoib_qp_state_validate *qp_work =
+ container_of(work, struct ipoib_qp_state_validate, work);
+
+ struct ipoib_dev_priv *priv = qp_work->priv;
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
+ __func__, ret);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x is in state: %d\n",
+ __func__, priv->qp->qp_num, qp_attr.qp_state);
+
+ /* currently support only in SQE->RTS transition*/
+ if (qp_attr.qp_state == IB_QPS_SQE) {
+ qp_attr.qp_state = IB_QPS_RTS;
+
+ ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
+ if (ret) {
+ pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
+ ret, priv->qp->qp_num);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
+ __func__, priv->qp->qp_num);
+ } else {
+ pr_warn("QP (%d) will stay in state: %d\n",
+ priv->qp->qp_num, qp_attr.qp_state);
+ }
+
+free_res:
+ kfree(qp_work);
+}
+
+static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ unsigned int wr_id = wc->wr_id;
+ struct ipoib_tx_buf *tx_req;
+
+ ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_sendq_size)) {
+ ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_sendq_size);
+ return;
+ }
+
+ tx_req = &priv->tx_ring[wr_id];
+
+ ipoib_dma_unmap_tx(priv, tx_req);
+
+ ++dev->stats.tx_packets;
+ dev->stats.tx_bytes += tx_req->skb->len;
+
+ dev_kfree_skb_any(tx_req->skb);
+
+ ++priv->tx_tail;
+ ++priv->global_tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
+ netif_wake_queue(dev);
+
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct ipoib_qp_state_validate *qp_work;
+ ipoib_warn(priv,
+ "failed send event (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
+ if (!qp_work)
+ return;
+
+ INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
+ qp_work->priv = priv;
+ queue_work(priv->wq, &qp_work->work);
+ }
+}
+
+static int poll_tx(struct ipoib_dev_priv *priv)
+{
+ int n, i;
+ struct ib_wc *wc;
+
+ n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
+ for (i = 0; i < n; ++i) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
+ else
+ ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
+ }
+ return n == MAX_SEND_CQE;
+}
+
+int ipoib_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(napi, struct ipoib_dev_priv, recv_napi);
+ struct net_device *dev = priv->dev;
+ int done;
+ int t;
+ int n, i;
+
+ done = 0;
+
+poll_more:
+ while (done < budget) {
+ int max = (budget - done);
+
+ t = min(IPOIB_NUM_WC, max);
+ n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
+
+ for (i = 0; i < n; i++) {
+ struct ib_wc *wc = priv->ibwc + i;
+
+ if (wc->wr_id & IPOIB_OP_RECV) {
+ ++done;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_rx_wc(dev, wc);
+ else
+ ipoib_ib_handle_rx_wc(dev, wc);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
+ }
+
+ if (n != t)
+ break;
+ }
+
+ if (done < budget) {
+ napi_complete(napi);
+ if (unlikely(ib_req_notify_cq(priv->recv_cq,
+ IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) &&
+ napi_reschedule(napi))
+ goto poll_more;
+ }
+
+ return done;
+}
+
+int ipoib_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
+ send_napi);
+ struct net_device *dev = priv->dev;
+ int n, i;
+ struct ib_wc *wc;
+
+poll_more:
+ n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
+
+ for (i = 0; i < n; i++) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(dev, wc);
+ else
+ ipoib_ib_handle_tx_wc(dev, wc);
+ }
+
+ if (n < budget) {
+ napi_complete(napi);
+ if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) &&
+ napi_reschedule(napi))
+ goto poll_more;
+ }
+ return n < 0 ? 0 : n;
+}
+
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
+{
+ struct ipoib_dev_priv *priv = ctx_ptr;
+
+ napi_schedule(&priv->recv_napi);
+}
+
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
+{
+ struct ipoib_dev_priv *priv = ctx_ptr;
+
+ napi_schedule(&priv->send_napi);
+}
+
+static inline int post_send(struct ipoib_dev_priv *priv,
+ unsigned int wr_id,
+ struct ib_ah *address, u32 dqpn,
+ struct ipoib_tx_buf *tx_req,
+ void *head, int hlen)
+{
+ struct sk_buff *skb = tx_req->skb;
+
+ ipoib_build_sge(priv, tx_req);
+
+ priv->tx_wr.wr.wr_id = wr_id;
+ priv->tx_wr.remote_qpn = dqpn;
+ priv->tx_wr.ah = address;
+
+ if (head) {
+ priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
+ priv->tx_wr.header = head;
+ priv->tx_wr.hlen = hlen;
+ priv->tx_wr.wr.opcode = IB_WR_LSO;
+ } else
+ priv->tx_wr.wr.opcode = IB_WR_SEND;
+
+ return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
+}
+
+int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_tx_buf *tx_req;
+ int hlen, rc;
+ void *phead;
+ unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
+
+ if (skb_is_gso(skb)) {
+ hlen = skb_tcp_all_headers(skb);
+ phead = skb->data;
+ if (unlikely(!skb_pull(skb, hlen))) {
+ ipoib_warn(priv, "linear data too small\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ } else {
+ if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
+ ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
+ return -1;
+ }
+ phead = NULL;
+ hlen = 0;
+ }
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ }
+
+ ipoib_dbg_data(priv,
+ "sending packet, length=%d address=%p dqpn=0x%06x\n",
+ skb->len, address, dqpn);
+
+ /*
+ * We put the skb into the tx_ring _before_ we call post_send()
+ * because it's entirely possible that the completion handler will
+ * run before we execute anything after the post_send(). That
+ * means we have to make sure everything is properly recorded and
+ * our state is consistent before we call post_send().
+ */
+ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
+ tx_req->skb = skb;
+ if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
+ else
+ priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
+ /* increase the tx_head after send success, but use it for queue state */
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
+ ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
+ netif_stop_queue(dev);
+ }
+
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
+ if (netif_queue_stopped(dev))
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS) < 0)
+ ipoib_warn(priv, "request notify on send CQ failed\n");
+
+ rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
+ address, dqpn, tx_req, phead, hlen);
+ if (unlikely(rc)) {
+ ipoib_warn(priv, "post_send failed, error %d\n", rc);
+ ++dev->stats.tx_errors;
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ rc = 0;
+ } else {
+ netif_trans_update(dev);
+
+ rc = priv->tx_head;
+ ++priv->tx_head;
+ ++priv->global_tx_head;
+ }
+ return rc;
+}
+
+static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_ah *ah, *tah;
+ unsigned long flags;
+
+ netif_tx_lock_bh(priv->dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
+ if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
+ list_del(&ah->list);
+ rdma_destroy_ah(ah->ah, 0);
+ kfree(ah);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(priv->dev);
+}
+
+void ipoib_reap_ah(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+
+ ipoib_reap_dead_ahs(priv);
+
+ if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ round_jiffies_relative(HZ));
+}
+
+static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
+{
+ clear_bit(IPOIB_STOP_REAPER, &priv->flags);
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ round_jiffies_relative(HZ));
+}
+
+static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
+{
+ set_bit(IPOIB_STOP_REAPER, &priv->flags);
+ cancel_delayed_work(&priv->ah_reap_task);
+ /*
+ * After ipoib_stop_ah_reaper() we always go through
+ * ipoib_reap_dead_ahs() which ensures the work is really stopped and
+ * does a final flush out of the dead_ah's list
+ */
+}
+
+static int recvs_pending(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int pending = 0;
+ int i;
+
+ for (i = 0; i < ipoib_recvq_size; ++i)
+ if (priv->rx_ring[i].skb)
+ ++pending;
+
+ return pending;
+}
+
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+ struct ib_qp *qp,
+ enum ib_qp_state new_state)
+{
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+ return;
+ }
+ /* print according to the new-state and the previous state.*/
+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+ else
+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+ new_state, qp_attr.qp_state);
+}
+
+static void ipoib_napi_enable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_enable(&priv->recv_napi);
+ napi_enable(&priv->send_napi);
+}
+
+static void ipoib_napi_disable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_disable(&priv->recv_napi);
+ napi_disable(&priv->send_napi);
+}
+
+int ipoib_ib_dev_stop_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr qp_attr;
+ unsigned long begin;
+ struct ipoib_tx_buf *tx_req;
+ int i;
+
+ if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_napi_disable(dev);
+
+ ipoib_cm_dev_stop(dev);
+
+ /*
+ * Move our QP to the error state and then reinitialize in
+ * when all work requests have completed or have been flushed.
+ */
+ qp_attr.qp_state = IB_QPS_ERR;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
+
+ /* Wait for all sends and receives to complete */
+ begin = jiffies;
+
+ while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
+ if (time_after(jiffies, begin + 5 * HZ)) {
+ ipoib_warn(priv,
+ "timing out; %d sends %d receives not completed\n",
+ priv->tx_head - priv->tx_tail,
+ recvs_pending(dev));
+
+ /*
+ * assume the HW is wedged and just free up
+ * all our pending work requests.
+ */
+ while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
+ tx_req = &priv->tx_ring[priv->tx_tail &
+ (ipoib_sendq_size - 1)];
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(tx_req->skb);
+ ++priv->tx_tail;
+ ++priv->global_tx_tail;
+ }
+
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ struct ipoib_rx_buf *rx_req;
+
+ rx_req = &priv->rx_ring[i];
+ if (!rx_req->skb)
+ continue;
+ ipoib_ud_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
+ dev_kfree_skb_any(rx_req->skb);
+ rx_req->skb = NULL;
+ }
+
+ goto timeout;
+ }
+
+ ipoib_drain_cq(dev);
+
+ usleep_range(1000, 2000);
+ }
+
+ ipoib_dbg(priv, "All sends and receives done.\n");
+
+timeout:
+ qp_attr.qp_state = IB_QPS_RESET;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ ipoib_warn(priv, "Failed to modify QP to RESET state\n");
+
+ ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
+
+ return 0;
+}
+
+int ipoib_ib_dev_open_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ ret = ipoib_init_qp(dev);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
+ return -1;
+ }
+
+ ret = ipoib_ib_post_receives(dev);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
+ goto out;
+ }
+
+ ret = ipoib_cm_dev_open(dev);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
+ goto out;
+ }
+
+ if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_napi_enable(dev);
+
+ return 0;
+out:
+ return -1;
+}
+
+int ipoib_ib_dev_open(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_pkey_dev_check_presence(dev);
+
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
+ ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
+ (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
+ return -1;
+ }
+
+ ipoib_start_ah_reaper(priv);
+ if (priv->rn_ops->ndo_open(dev)) {
+ pr_warn("%s: Failed to open dev\n", dev->name);
+ goto dev_stop;
+ }
+
+ set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+
+ return 0;
+
+dev_stop:
+ ipoib_stop_ah_reaper(priv);
+ return -1;
+}
+
+void ipoib_ib_dev_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ priv->rn_ops->ndo_stop(dev);
+
+ clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ ipoib_stop_ah_reaper(priv);
+}
+
+void ipoib_pkey_dev_check_presence(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ if (!(priv->pkey & 0x7fff) ||
+ ib_find_pkey(priv->ca, priv->port, priv->pkey,
+ &priv->pkey_index)) {
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ } else {
+ if (rn->set_id)
+ rn->set_id(dev, priv->pkey_index);
+ set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ }
+}
+
+void ipoib_ib_dev_up(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_pkey_dev_check_presence(dev);
+
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
+ ipoib_dbg(priv, "PKEY is not assigned.\n");
+ return;
+ }
+
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+
+ ipoib_mcast_start_thread(dev);
+}
+
+void ipoib_ib_dev_down(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "downing ib_dev\n");
+
+ clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ netif_carrier_off(dev);
+
+ ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_dev_flush(dev);
+
+ ipoib_flush_paths(dev);
+}
+
+void ipoib_drain_cq(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i, n;
+
+ /*
+ * We call completion handling routines that expect to be
+ * called from the BH-disabled NAPI poll context, so disable
+ * BHs here too.
+ */
+ local_bh_disable();
+
+ do {
+ n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
+ for (i = 0; i < n; ++i) {
+ /*
+ * Convert any successful completions to flush
+ * errors to avoid passing packets up the
+ * stack after bringing the device down.
+ */
+ if (priv->ibwc[i].status == IB_WC_SUCCESS)
+ priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
+
+ if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
+ if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
+ else
+ ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
+ }
+ } while (n == IPOIB_NUM_WC);
+
+ while (poll_tx(priv))
+ ; /* nothing */
+
+ local_bh_enable();
+}
+
+/*
+ * Takes whatever value which is in pkey index 0 and updates priv->pkey
+ * returns 0 if the pkey value was changed.
+ */
+static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
+{
+ int result;
+ u16 prev_pkey;
+
+ prev_pkey = priv->pkey;
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
+ priv->port, result);
+ return result;
+ }
+
+ priv->pkey |= 0x8000;
+
+ if (prev_pkey != priv->pkey) {
+ ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
+ prev_pkey, priv->pkey);
+ /*
+ * Update the pkey in the broadcast address, while making sure to set
+ * the full membership bit, so that we join the right broadcast group.
+ */
+ priv->dev->broadcast[8] = priv->pkey >> 8;
+ priv->dev->broadcast[9] = priv->pkey & 0xff;
+ return 0;
+ }
+
+ return 1;
+}
+/*
+ * returns 0 if pkey value was found in a different slot.
+ */
+static inline int update_child_pkey(struct ipoib_dev_priv *priv)
+{
+ u16 old_index = priv->pkey_index;
+
+ priv->pkey_index = 0;
+ ipoib_pkey_dev_check_presence(priv->dev);
+
+ if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
+ (old_index == priv->pkey_index))
+ return 1;
+ return 0;
+}
+
+/*
+ * returns true if the device address of the ipoib interface has changed and the
+ * new address is a valid one (i.e in the gid table), return false otherwise.
+ */
+static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
+{
+ union ib_gid search_gid;
+ union ib_gid gid0;
+ int err;
+ u16 index;
+ u32 port;
+ bool ret = false;
+
+ if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
+ return false;
+
+ netif_addr_lock_bh(priv->dev);
+
+ /* The subnet prefix may have changed, update it now so we won't have
+ * to do it later
+ */
+ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix,
+ sizeof(gid0.global.subnet_prefix));
+ search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+
+ search_gid.global.interface_id = priv->local_gid.global.interface_id;
+
+ netif_addr_unlock_bh(priv->dev);
+
+ err = ib_find_gid(priv->ca, &search_gid, &port, &index);
+
+ netif_addr_lock_bh(priv->dev);
+
+ if (search_gid.global.interface_id !=
+ priv->local_gid.global.interface_id)
+ /* There was a change while we were looking up the gid, bail
+ * here and let the next work sort this out
+ */
+ goto out;
+
+ /* The next section of code needs some background:
+ * Per IB spec the port GUID can't change if the HCA is powered on.
+ * port GUID is the basis for GID at index 0 which is the basis for
+ * the default device address of a ipoib interface.
+ *
+ * so it seems the flow should be:
+ * if user_changed_dev_addr && gid in gid tbl
+ * set bit dev_addr_set
+ * return true
+ * else
+ * return false
+ *
+ * The issue is that there are devices that don't follow the spec,
+ * they change the port GUID when the HCA is powered, so in order
+ * not to break userspace applications, We need to check if the
+ * user wanted to control the device address and we assume that
+ * if he sets the device address back to be based on GID index 0,
+ * he no longer wishs to control it.
+ *
+ * If the user doesn't control the device address,
+ * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
+ * the port GUID has changed and GID at index 0 has changed
+ * so we need to change priv->local_gid and priv->dev->dev_addr
+ * to reflect the new GID.
+ */
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ if (!err && port == priv->port) {
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+ if (index == 0)
+ clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
+ &priv->flags);
+ else
+ set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
+ ret = true;
+ } else {
+ ret = false;
+ }
+ } else {
+ if (!err && port == priv->port) {
+ ret = true;
+ } else {
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
+ memcpy(&priv->local_gid, &gid0,
+ sizeof(priv->local_gid));
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0,
+ sizeof(priv->local_gid));
+ ret = true;
+ }
+ }
+ }
+
+out:
+ netif_addr_unlock_bh(priv->dev);
+
+ return ret;
+}
+
+static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level,
+ int nesting)
+{
+ struct ipoib_dev_priv *cpriv;
+ struct net_device *dev = priv->dev;
+ int result;
+
+ down_read_nested(&priv->vlan_rwsem, nesting);
+
+ /*
+ * Flush any child interfaces too -- they might be up even if
+ * the parent is down.
+ */
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
+
+ up_read(&priv->vlan_rwsem);
+
+ if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
+ level != IPOIB_FLUSH_HEAVY) {
+ /* Make sure the dev_addr is set even if not flushing */
+ if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
+ ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
+ return;
+ }
+
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
+ /* interface is down. update pkey and leave. */
+ if (level == IPOIB_FLUSH_HEAVY) {
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ update_parent_pkey(priv);
+ else
+ update_child_pkey(priv);
+ } else if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
+ ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
+ return;
+ }
+
+ if (level == IPOIB_FLUSH_HEAVY) {
+ /* child devices chase their origin pkey value, while non-child
+ * (parent) devices should always takes what present in pkey index 0
+ */
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ result = update_child_pkey(priv);
+ if (result) {
+ /* restart QP only if P_Key index is changed */
+ ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
+ return;
+ }
+
+ } else {
+ result = update_parent_pkey(priv);
+ /* restart QP only if P_Key value changed */
+ if (result) {
+ ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
+ return;
+ }
+ }
+ }
+
+ if (level == IPOIB_FLUSH_LIGHT) {
+ int oper_up;
+ ipoib_mark_paths_invalid(dev);
+ /* Set IPoIB operation as down to prevent races between:
+ * the flush flow which leaves MCG and on the fly joins
+ * which can happen during that time. mcast restart task
+ * should deal with join requests we missed.
+ */
+ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_mcast_dev_flush(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_reap_dead_ahs(priv);
+ }
+
+ if (level >= IPOIB_FLUSH_NORMAL)
+ ipoib_ib_dev_down(dev);
+
+ if (level == IPOIB_FLUSH_HEAVY) {
+ if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_ib_dev_stop(dev);
+
+ if (ipoib_ib_dev_open(dev))
+ return;
+
+ if (netif_queue_stopped(dev))
+ netif_start_queue(dev);
+ }
+
+ /*
+ * The device could have been brought down between the start and when
+ * we get here, don't bring it back up if it's not configured up
+ */
+ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
+ if (level >= IPOIB_FLUSH_NORMAL)
+ ipoib_ib_dev_up(dev);
+ if (ipoib_dev_addr_changed_valid(priv))
+ ipoib_mcast_restart_task(&priv->restart_task);
+ }
+}
+
+void ipoib_ib_dev_flush_light(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, flush_light);
+
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
+}
+
+void ipoib_ib_dev_flush_normal(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, flush_normal);
+
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
+}
+
+void ipoib_ib_dev_flush_heavy(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, flush_heavy);
+
+ rtnl_lock();
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ rtnl_unlock();
+}
+
+void ipoib_ib_dev_cleanup(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "cleaning up ib_dev\n");
+ /*
+ * We must make sure there are no more (path) completions
+ * that may wish to touch priv fields that are no longer valid
+ */
+ ipoib_flush_paths(dev);
+
+ ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_dev_flush(dev);
+
+ /*
+ * All of our ah references aren't free until after
+ * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
+ * the neighbor garbage collection is stopped and reaped.
+ * That should all be done now, so make a final ah flush.
+ */
+ ipoib_reap_dead_ahs(priv);
+
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+
+ priv->rn_ops->ndo_uninit(dev);
+
+ if (priv->pd) {
+ ib_dealloc_pd(priv->pd);
+ priv->pd = NULL;
+ }
+}
+
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
new file mode 100644
index 000000000..f10d4bcf8
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -0,0 +1,2698 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipoib.h"
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
+
+#include <linux/ip.h>
+#include <linux/in.h>
+
+#include <linux/jhash.h>
+#include <net/arp.h>
+#include <net/addrconf.h>
+#include <linux/inetdevice.h>
+#include <rdma/ib_cache.h>
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
+int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
+
+module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
+MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
+module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
+MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+int ipoib_debug_level;
+
+module_param_named(debug_level, ipoib_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
+#endif
+
+struct ipoib_path_iter {
+ struct net_device *dev;
+ struct ipoib_path path;
+};
+
+static const u8 ipv4_bcast_addr[] = {
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+
+struct workqueue_struct *ipoib_workqueue;
+
+struct ib_sa_client ipoib_sa_client;
+
+static int ipoib_add_one(struct ib_device *device);
+static void ipoib_remove_one(struct ib_device *device, void *client_data);
+static void ipoib_neigh_reclaim(struct rcu_head *rp);
+static struct net_device *ipoib_get_net_dev_by_params(
+ struct ib_device *dev, u32 port, u16 pkey,
+ const union ib_gid *gid, const struct sockaddr *addr,
+ void *client_data);
+static int ipoib_set_mac(struct net_device *dev, void *addr);
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd);
+
+static struct ib_client ipoib_client = {
+ .name = "ipoib",
+ .add = ipoib_add_one,
+ .remove = ipoib_remove_one,
+ .get_net_dev_by_params = ipoib_get_net_dev_by_params,
+};
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static int ipoib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_info *ni = ptr;
+ struct net_device *dev = ni->dev;
+
+ if (dev->netdev_ops->ndo_open != ipoib_open)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_CHANGENAME:
+ ipoib_delete_debug_files(dev);
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_UNREGISTER:
+ ipoib_delete_debug_files(dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
+int ipoib_open(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "bringing up interface\n");
+
+ netif_carrier_off(dev);
+
+ set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+
+ if (ipoib_ib_dev_open(dev)) {
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
+ return 0;
+ goto err_disable;
+ }
+
+ ipoib_ib_dev_up(dev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ /* Bring up any child interfaces too */
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(cpriv, &priv->child_intfs, list) {
+ int flags;
+
+ flags = cpriv->dev->flags;
+ if (flags & IFF_UP)
+ continue;
+
+ dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
+ }
+ up_read(&priv->vlan_rwsem);
+ } else if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags))
+ ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n",
+ ppriv->dev->name);
+ }
+ netif_start_queue(dev);
+
+ return 0;
+
+err_disable:
+ clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+
+ return -EINVAL;
+}
+
+static int ipoib_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "stopping interface\n");
+
+ clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+
+ netif_stop_queue(dev);
+
+ ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_stop(dev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ /* Bring down any child interfaces too */
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(cpriv, &priv->child_intfs, list) {
+ int flags;
+
+ flags = cpriv->dev->flags;
+ if (!(flags & IFF_UP))
+ continue;
+
+ dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
+ }
+ up_read(&priv->vlan_rwsem);
+ }
+
+ return 0;
+}
+
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+
+ return features;
+}
+
+static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = 0;
+
+ /* dev->mtu > 2K ==> connected mode */
+ if (ipoib_cm_admin_enabled(dev)) {
+ if (new_mtu > ipoib_cm_max_mtu(dev))
+ return -EINVAL;
+
+ if (new_mtu > priv->mcast_mtu)
+ ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
+ priv->mcast_mtu);
+
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
+ new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
+ return -EINVAL;
+
+ priv->admin_mtu = new_mtu;
+
+ if (priv->mcast_mtu < priv->admin_mtu)
+ ipoib_dbg(priv, "MTU must be smaller than the underlying "
+ "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
+
+ new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
+
+ if (priv->rn_ops->ndo_change_mtu) {
+ bool carrier_status = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+
+ /* notify lower level on the real mtu */
+ ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
+
+ if (carrier_status)
+ netif_carrier_on(dev);
+ } else {
+ dev->mtu = new_mtu;
+ }
+
+ return ret;
+}
+
+static void ipoib_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->rn_ops->ndo_get_stats64)
+ priv->rn_ops->ndo_get_stats64(dev, stats);
+ else
+ netdev_stats_to_stats64(stats, &dev->stats);
+}
+
+/* Called with an RCU read lock taken */
+static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
+ struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+ struct in_device *in_dev;
+ struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
+ struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
+ __be32 ret_addr;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ in_dev = in_dev_get(dev);
+ if (!in_dev)
+ return false;
+
+ ret_addr = inet_confirm_addr(net, in_dev, 0,
+ addr_in->sin_addr.s_addr,
+ RT_SCOPE_HOST);
+ in_dev_put(in_dev);
+ if (ret_addr)
+ return true;
+
+ break;
+ case AF_INET6:
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
+ return true;
+
+ break;
+ }
+ return false;
+}
+
+/*
+ * Find the master net_device on top of the given net_device.
+ * @dev: base IPoIB net_device
+ *
+ * Returns the master net_device with a reference held, or the same net_device
+ * if no master exists.
+ */
+static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
+{
+ struct net_device *master;
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master)
+ dev_hold(master);
+ rcu_read_unlock();
+
+ if (master)
+ return master;
+
+ dev_hold(dev);
+ return dev;
+}
+
+struct ipoib_walk_data {
+ const struct sockaddr *addr;
+ struct net_device *result;
+};
+
+static int ipoib_upper_walk(struct net_device *upper,
+ struct netdev_nested_priv *priv)
+{
+ struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data;
+ int ret = 0;
+
+ if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
+ dev_hold(upper);
+ data->result = upper;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * ipoib_get_net_dev_match_addr - Find a net_device matching
+ * the given address, which is an upper device of the given net_device.
+ *
+ * @addr: IP address to look for.
+ * @dev: base IPoIB net_device
+ *
+ * If found, returns the net_device with a reference held. Otherwise return
+ * NULL.
+ */
+static struct net_device *ipoib_get_net_dev_match_addr(
+ const struct sockaddr *addr, struct net_device *dev)
+{
+ struct netdev_nested_priv priv;
+ struct ipoib_walk_data data = {
+ .addr = addr,
+ };
+
+ priv.data = (void *)&data;
+ rcu_read_lock();
+ if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
+ dev_hold(dev);
+ data.result = dev;
+ goto out;
+ }
+
+ netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv);
+out:
+ rcu_read_unlock();
+ return data.result;
+}
+
+/* returns the number of IPoIB netdevs on top a given ipoib device matching a
+ * pkey_index and address, if one exists.
+ *
+ * @found_net_dev: contains a matching net_device if the return value >= 1,
+ * with a reference held. */
+static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
+ const union ib_gid *gid,
+ u16 pkey_index,
+ const struct sockaddr *addr,
+ int nesting,
+ struct net_device **found_net_dev)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *net_dev = NULL;
+ int matches = 0;
+
+ if (priv->pkey_index == pkey_index &&
+ (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
+ if (!addr) {
+ net_dev = ipoib_get_master_net_dev(priv->dev);
+ } else {
+ /* Verify the net_device matches the IP address, as
+ * IPoIB child devices currently share a GID. */
+ net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
+ }
+ if (net_dev) {
+ if (!*found_net_dev)
+ *found_net_dev = net_dev;
+ else
+ dev_put(net_dev);
+ ++matches;
+ }
+ }
+
+ /* Check child interfaces */
+ down_read_nested(&priv->vlan_rwsem, nesting);
+ list_for_each_entry(child_priv, &priv->child_intfs, list) {
+ matches += ipoib_match_gid_pkey_addr(child_priv, gid,
+ pkey_index, addr,
+ nesting + 1,
+ found_net_dev);
+ if (matches > 1)
+ break;
+ }
+ up_read(&priv->vlan_rwsem);
+
+ return matches;
+}
+
+/* Returns the number of matching net_devs found (between 0 and 2). Also
+ * return the matching net_device in the @net_dev parameter, holding a
+ * reference to the net_device, if the number of matches >= 1 */
+static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port,
+ u16 pkey_index,
+ const union ib_gid *gid,
+ const struct sockaddr *addr,
+ struct net_device **net_dev)
+{
+ struct ipoib_dev_priv *priv;
+ int matches = 0;
+
+ *net_dev = NULL;
+
+ list_for_each_entry(priv, dev_list, list) {
+ if (priv->port != port)
+ continue;
+
+ matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
+ addr, 0, net_dev);
+ if (matches > 1)
+ break;
+ }
+
+ return matches;
+}
+
+static struct net_device *ipoib_get_net_dev_by_params(
+ struct ib_device *dev, u32 port, u16 pkey,
+ const union ib_gid *gid, const struct sockaddr *addr,
+ void *client_data)
+{
+ struct net_device *net_dev;
+ struct list_head *dev_list = client_data;
+ u16 pkey_index;
+ int matches;
+ int ret;
+
+ if (!rdma_protocol_ib(dev, port))
+ return NULL;
+
+ ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
+ if (ret)
+ return NULL;
+
+ /* See if we can find a unique device matching the L2 parameters */
+ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
+ gid, NULL, &net_dev);
+
+ switch (matches) {
+ case 0:
+ return NULL;
+ case 1:
+ return net_dev;
+ }
+
+ dev_put(net_dev);
+
+ /* Couldn't find a unique device with L2 parameters only. Use L3
+ * address to uniquely match the net device */
+ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
+ gid, addr, &net_dev);
+ switch (matches) {
+ case 0:
+ return NULL;
+ default:
+ dev_warn_ratelimited(&dev->dev,
+ "duplicate IP address detected\n");
+ fallthrough;
+ case 1:
+ return net_dev;
+ }
+}
+
+int ipoib_set_mode(struct net_device *dev, const char *buf)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "connected\n")) ||
+ (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "datagram\n"))) {
+ return 0;
+ }
+
+ /* flush paths if we switch modes so that connections are restarted */
+ if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
+ set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ ipoib_warn(priv, "enabling connected mode "
+ "will cause multicast packet drops\n");
+ netdev_update_features(dev);
+ dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_real_num_tx_queues(dev, 1);
+ rtnl_unlock();
+ priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
+
+ ipoib_flush_paths(dev);
+ return (!rtnl_trylock()) ? -EBUSY : 0;
+ }
+
+ if (!strcmp(buf, "datagram\n")) {
+ clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ netdev_update_features(dev);
+ dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
+ rtnl_unlock();
+ ipoib_flush_paths(dev);
+ return (!rtnl_trylock()) ? -EBUSY : 0;
+ }
+
+ return -EINVAL;
+}
+
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node *n = priv->path_tree.rb_node;
+ struct ipoib_path *path;
+ int ret;
+
+ while (n) {
+ path = rb_entry(n, struct ipoib_path, rb_node);
+
+ ret = memcmp(gid, path->pathrec.dgid.raw,
+ sizeof (union ib_gid));
+
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ return path;
+ }
+
+ return NULL;
+}
+
+static int __path_add(struct net_device *dev, struct ipoib_path *path)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node **n = &priv->path_tree.rb_node;
+ struct rb_node *pn = NULL;
+ struct ipoib_path *tpath;
+ int ret;
+
+ while (*n) {
+ pn = *n;
+ tpath = rb_entry(pn, struct ipoib_path, rb_node);
+
+ ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
+ sizeof (union ib_gid));
+ if (ret < 0)
+ n = &pn->rb_left;
+ else if (ret > 0)
+ n = &pn->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&path->rb_node, pn, n);
+ rb_insert_color(&path->rb_node, &priv->path_tree);
+
+ list_add_tail(&path->list, &priv->path_list);
+
+ return 0;
+}
+
+static void path_free(struct net_device *dev, struct ipoib_path *path)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&path->queue)))
+ dev_kfree_skb_irq(skb);
+
+ ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
+
+ /* remove all neigh connected to this path */
+ ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
+
+ if (path->ah)
+ ipoib_put_ah(path->ah);
+
+ kfree(path);
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
+{
+ struct ipoib_path_iter *iter;
+
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return NULL;
+
+ iter->dev = dev;
+ memset(iter->path.pathrec.dgid.raw, 0, 16);
+
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+int ipoib_path_iter_next(struct ipoib_path_iter *iter)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
+ struct rb_node *n;
+ struct ipoib_path *path;
+ int ret = 1;
+
+ spin_lock_irq(&priv->lock);
+
+ n = rb_first(&priv->path_tree);
+
+ while (n) {
+ path = rb_entry(n, struct ipoib_path, rb_node);
+
+ if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
+ sizeof (union ib_gid)) < 0) {
+ iter->path = *path;
+ ret = 0;
+ break;
+ }
+
+ n = rb_next(n);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+ struct ipoib_path *path)
+{
+ *path = iter->path;
+}
+
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+
+void ipoib_mark_paths_invalid(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path, *tp;
+
+ spin_lock_irq(&priv->lock);
+
+ list_for_each_entry_safe(path, tp, &priv->path_list, list) {
+ ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
+ path->pathrec.dgid.raw);
+ if (path->ah)
+ path->ah->valid = 0;
+ }
+
+ spin_unlock_irq(&priv->lock);
+}
+
+static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
+{
+ struct ipoib_pseudo_header *phdr;
+
+ phdr = skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+}
+
+void ipoib_flush_paths(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path, *tp;
+ LIST_HEAD(remove_list);
+ unsigned long flags;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_splice_init(&priv->path_list, &remove_list);
+
+ list_for_each_entry(path, &remove_list, list)
+ rb_erase(&path->rb_node, &priv->path_tree);
+
+ list_for_each_entry_safe(path, tp, &remove_list, list) {
+ if (path->query)
+ ib_sa_cancel_query(path->query_id, path->query);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+ wait_for_completion(&path->done);
+ path_free(dev, path);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+static void path_rec_completion(int status,
+ struct sa_path_rec *pathrec,
+ int num_prs, void *path_ptr)
+{
+ struct ipoib_path *path = path_ptr;
+ struct net_device *dev = path->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_ah *ah = NULL;
+ struct ipoib_ah *old_ah = NULL;
+ struct ipoib_neigh *neigh, *tn;
+ struct sk_buff_head skqueue;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (!status)
+ ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
+ be32_to_cpu(sa_path_get_dlid(pathrec)),
+ pathrec->dgid.raw);
+ else
+ ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
+ status, path->pathrec.dgid.raw);
+
+ skb_queue_head_init(&skqueue);
+
+ if (!status) {
+ struct rdma_ah_attr av;
+
+ if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
+ pathrec, &av, NULL)) {
+ ah = ipoib_create_ah(dev, priv->pd, &av);
+ rdma_destroy_ah_attr(&av);
+ }
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
+ path->pathrec = *pathrec;
+
+ old_ah = path->ah;
+ path->ah = ah;
+
+ ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
+ ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
+ pathrec->sl);
+
+ while ((skb = __skb_dequeue(&path->queue)))
+ __skb_queue_tail(&skqueue, skb);
+
+ list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
+ if (neigh->ah) {
+ WARN_ON(neigh->ah != old_ah);
+ /*
+ * Dropping the ah reference inside
+ * priv->lock is safe here, because we
+ * will hold one more reference from
+ * the original value of path->ah (ie
+ * old_ah).
+ */
+ ipoib_put_ah(neigh->ah);
+ }
+ kref_get(&path->ah->ref);
+ neigh->ah = path->ah;
+
+ if (ipoib_cm_enabled(dev, neigh->daddr)) {
+ if (!ipoib_cm_get(neigh))
+ ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
+ path,
+ neigh));
+ if (!ipoib_cm_get(neigh)) {
+ ipoib_neigh_free(neigh);
+ continue;
+ }
+ }
+
+ while ((skb = __skb_dequeue(&neigh->queue)))
+ __skb_queue_tail(&skqueue, skb);
+ }
+ path->ah->valid = 1;
+ }
+
+ path->query = NULL;
+ complete(&path->done);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (IS_ERR_OR_NULL(ah))
+ ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
+
+ if (old_ah)
+ ipoib_put_ah(old_ah);
+
+ while ((skb = __skb_dequeue(&skqueue))) {
+ int ret;
+ skb->dev = dev;
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
+ }
+}
+
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
+ void *gid)
+{
+ path->dev = priv->dev;
+
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
+
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.numb_path = 1;
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+}
+
+static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path;
+
+ if (!priv->broadcast)
+ return NULL;
+
+ path = kzalloc(sizeof(*path), GFP_ATOMIC);
+ if (!path)
+ return NULL;
+
+ skb_queue_head_init(&path->queue);
+
+ INIT_LIST_HEAD(&path->neigh_list);
+
+ init_path_rec(priv, path, gid);
+
+ return path;
+}
+
+static int path_rec_start(struct net_device *dev,
+ struct ipoib_path *path)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "Start path record lookup for %pI6\n",
+ path->pathrec.dgid.raw);
+
+ init_completion(&path->done);
+
+ path->query_id =
+ ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
+ &path->pathrec,
+ IB_SA_PATH_REC_DGID |
+ IB_SA_PATH_REC_SGID |
+ IB_SA_PATH_REC_NUMB_PATH |
+ IB_SA_PATH_REC_TRAFFIC_CLASS |
+ IB_SA_PATH_REC_PKEY,
+ 1000, GFP_ATOMIC,
+ path_rec_completion,
+ path, &path->query);
+ if (path->query_id < 0) {
+ ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
+ path->query = NULL;
+ complete(&path->done);
+ return path->query_id;
+ }
+
+ return 0;
+}
+
+static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ path = __path_find(dev, daddr + 4);
+ if (!path)
+ goto out;
+ if (!path->query)
+ path_rec_start(dev, path);
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_path *path;
+ struct ipoib_neigh *neigh;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ neigh = ipoib_neigh_alloc(daddr, dev);
+ if (!neigh) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
+ }
+
+ path = __path_find(dev, daddr + 4);
+ if (!path) {
+ path = path_rec_create(dev, daddr + 4);
+ if (!path)
+ goto err_path;
+
+ __path_add(dev, path);
+ }
+
+ list_add_tail(&neigh->list, &path->neigh_list);
+
+ if (path->ah && path->ah->valid) {
+ kref_get(&path->ah->ref);
+ neigh->ah = path->ah;
+
+ if (ipoib_cm_enabled(dev, neigh->daddr)) {
+ if (!ipoib_cm_get(neigh))
+ ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
+ if (!ipoib_cm_get(neigh)) {
+ ipoib_neigh_free(neigh);
+ goto err_drop;
+ }
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, neigh->daddr);
+ __skb_queue_tail(&neigh->queue, skb);
+ } else {
+ ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
+ skb_queue_len(&neigh->queue));
+ goto err_drop;
+ }
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(daddr));
+ ipoib_neigh_put(neigh);
+ return NULL;
+ }
+ } else {
+ neigh->ah = NULL;
+
+ if (!path->query && path_rec_start(dev, path))
+ goto err_path;
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, neigh->daddr);
+ __skb_queue_tail(&neigh->queue, skb);
+ } else {
+ goto err_drop;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_neigh_put(neigh);
+ return NULL;
+
+err_path:
+ ipoib_neigh_free(neigh);
+err_drop:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_neigh_put(neigh);
+
+ return NULL;
+}
+
+static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+ struct ipoib_pseudo_header *phdr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_path *path;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* no broadcast means that all paths are (going to be) not valid */
+ if (!priv->broadcast)
+ goto drop_and_unlock;
+
+ path = __path_find(dev, phdr->hwaddr + 4);
+ if (!path || !path->ah || !path->ah->valid) {
+ if (!path) {
+ path = path_rec_create(dev, phdr->hwaddr + 4);
+ if (!path)
+ goto drop_and_unlock;
+ __path_add(dev, path);
+ } else {
+ /*
+ * make sure there are no changes in the existing
+ * path record
+ */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+ }
+ if (!path->query && path_rec_start(dev, path)) {
+ goto drop_and_unlock;
+ }
+
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
+ __skb_queue_tail(&path->queue, skb);
+ goto unlock;
+ } else {
+ goto drop_and_unlock;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_dbg(priv, "Send unicast ARP to %08x\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
+ return;
+
+drop_and_unlock:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_neigh *neigh;
+ struct ipoib_pseudo_header *phdr;
+ struct ipoib_header *header;
+ unsigned long flags;
+
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
+ header = (struct ipoib_header *) skb->data;
+
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
+ /* multicast, arrange "if" according to probability */
+ if ((header->proto != htons(ETH_P_IP)) &&
+ (header->proto != htons(ETH_P_IPV6)) &&
+ (header->proto != htons(ETH_P_ARP)) &&
+ (header->proto != htons(ETH_P_RARP)) &&
+ (header->proto != htons(ETH_P_TIPC))) {
+ /* ethertype not supported by IPoIB */
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ /* Add in the P_Key for multicast*/
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
+
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (likely(neigh))
+ goto send_using_neigh;
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* unicast, arrange "switch" according to probability */
+ switch (header->proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ case htons(ETH_P_TIPC):
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (unlikely(!neigh)) {
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
+ }
+ break;
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_RARP):
+ /* for unicast ARP and RARP should always perform path find */
+ unicast_arp_send(skb, dev, phdr);
+ return NETDEV_TX_OK;
+ default:
+ /* ethertype not supported by IPoIB */
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+send_using_neigh:
+ /* note we now hold a ref to neigh */
+ if (ipoib_cm_get(neigh)) {
+ if (ipoib_cm_up(neigh)) {
+ ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
+ goto unref;
+ }
+ } else if (neigh->ah && neigh->ah->valid) {
+ neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
+ goto unref;
+ } else if (neigh->ah) {
+ neigh_refresh_path(neigh, phdr->hwaddr, dev);
+ }
+
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
+ spin_lock_irqsave(&priv->lock, flags);
+ __skb_queue_tail(&neigh->queue, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
+
+unref:
+ ipoib_neigh_put(neigh);
+
+ return NETDEV_TX_OK;
+}
+
+static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ if (rn->tx_timeout) {
+ rn->tx_timeout(dev, txqueue);
+ return;
+ }
+ ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
+ jiffies_to_msecs(jiffies - dev_trans_start(dev)));
+ ipoib_warn(priv,
+ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+ priv->global_tx_head, priv->global_tx_tail);
+
+ /* XXX reset QP, etc. */
+}
+
+static int ipoib_hard_header(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type,
+ const void *daddr,
+ const void *saddr,
+ unsigned int len)
+{
+ struct ipoib_header *header;
+
+ header = skb_push(skb, sizeof(*header));
+
+ header->proto = htons(type);
+ header->reserved = 0;
+
+ /*
+ * we don't rely on dst_entry structure, always stuff the
+ * destination address into skb hard header so we can figure out where
+ * to send the packet later.
+ */
+ push_pseudo_header(skb, daddr);
+
+ return IPOIB_HARD_LEN;
+}
+
+static void ipoib_set_mcast_list(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
+ ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
+ return;
+ }
+
+ queue_work(priv->wq, &priv->restart_task);
+}
+
+static int ipoib_get_iflink(const struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ /* parent interface */
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ return dev->ifindex;
+
+ /* child/vlan interface */
+ return priv->parent->ifindex;
+}
+
+static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
+{
+ /*
+ * Use only the address parts that contributes to spreading
+ * The subnet prefix is not used as one can not connect to
+ * same remote port (GUID) using the same remote QPN via two
+ * different subnets.
+ */
+ /* qpn octets[1:4) & port GUID octets[12:20) */
+ u32 *d32 = (u32 *) daddr;
+ u32 hv;
+
+ hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
+ return hv & htbl->mask;
+}
+
+struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh *neigh = NULL;
+ u32 hash_val;
+
+ rcu_read_lock_bh();
+
+ htbl = rcu_dereference_bh(ntbl->htbl);
+
+ if (!htbl)
+ goto out_unlock;
+
+ hash_val = ipoib_addr_hash(htbl, daddr);
+ for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
+ neigh != NULL;
+ neigh = rcu_dereference_bh(neigh->hnext)) {
+ if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
+ /* found, take one ref on behalf of the caller */
+ if (!refcount_inc_not_zero(&neigh->refcnt)) {
+ /* deleted */
+ neigh = NULL;
+ goto out_unlock;
+ }
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ rcu_read_unlock_bh();
+ return neigh;
+}
+
+static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long neigh_obsolete;
+ unsigned long dt;
+ unsigned long flags;
+ int i;
+ LIST_HEAD(remove_list);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+
+ if (!htbl)
+ goto out_unlock;
+
+ /* neigh is obsolete if it was idle for two GC periods */
+ dt = 2 * arp_tbl.gc_interval;
+ neigh_obsolete = jiffies - dt;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) != NULL) {
+ /* was the neigh idle for two GC periods */
+ if (time_after(neigh_obsolete, neigh->alive)) {
+
+ ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
+
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from path/mc list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ } else {
+ np = &neigh->hnext;
+ }
+
+ }
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_mcast_remove_list(&remove_list);
+}
+
+static void ipoib_reap_neigh(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
+
+ __ipoib_reap_neigh(priv);
+
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
+}
+
+
+static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_neigh *neigh;
+
+ neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
+ if (!neigh)
+ return NULL;
+
+ neigh->dev = dev;
+ memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
+ skb_queue_head_init(&neigh->queue);
+ INIT_LIST_HEAD(&neigh->list);
+ ipoib_cm_set(neigh, NULL);
+ /* one ref on behalf of the caller */
+ refcount_set(&neigh->refcnt, 1);
+
+ return neigh;
+}
+
+struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh *neigh;
+ u32 hash_val;
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+ if (!htbl) {
+ neigh = NULL;
+ goto out_unlock;
+ }
+
+ /* need to add a new neigh, but maybe some other thread succeeded?
+ * recalc hash, maybe hash resize took place so we do a search
+ */
+ hash_val = ipoib_addr_hash(htbl, daddr);
+ for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
+ lockdep_is_held(&priv->lock));
+ neigh != NULL;
+ neigh = rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock))) {
+ if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
+ /* found, take one ref on behalf of the caller */
+ if (!refcount_inc_not_zero(&neigh->refcnt)) {
+ /* deleted */
+ neigh = NULL;
+ break;
+ }
+ neigh->alive = jiffies;
+ goto out_unlock;
+ }
+ }
+
+ neigh = ipoib_neigh_ctor(daddr, dev);
+ if (!neigh)
+ goto out_unlock;
+
+ /* one ref on behalf of the hash table */
+ refcount_inc(&neigh->refcnt);
+ neigh->alive = jiffies;
+ /* put in hash */
+ rcu_assign_pointer(neigh->hnext,
+ rcu_dereference_protected(htbl->buckets[hash_val],
+ lockdep_is_held(&priv->lock)));
+ rcu_assign_pointer(htbl->buckets[hash_val], neigh);
+ atomic_inc(&ntbl->entries);
+
+out_unlock:
+
+ return neigh;
+}
+
+void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
+{
+ /* neigh reference count was dropprd to zero */
+ struct net_device *dev = neigh->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sk_buff *skb;
+ if (neigh->ah)
+ ipoib_put_ah(neigh->ah);
+ while ((skb = __skb_dequeue(&neigh->queue))) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
+ if (ipoib_cm_get(neigh))
+ ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
+ ipoib_dbg(ipoib_priv(dev),
+ "neigh free for %06x %pI6\n",
+ IPOIB_QPN(neigh->daddr),
+ neigh->daddr + 4);
+ kfree(neigh);
+ if (atomic_dec_and_test(&priv->ntbl.entries)) {
+ if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
+ complete(&priv->ntbl.flushed);
+ }
+}
+
+static void ipoib_neigh_reclaim(struct rcu_head *rp)
+{
+ /* Called as a result of removal from hash table */
+ struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
+ /* note TX context may hold another ref */
+ ipoib_neigh_put(neigh);
+}
+
+void ipoib_neigh_free(struct ipoib_neigh *neigh)
+{
+ struct net_device *dev = neigh->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh __rcu **np;
+ struct ipoib_neigh *n;
+ u32 hash_val;
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+ if (!htbl)
+ return;
+
+ hash_val = ipoib_addr_hash(htbl, neigh->daddr);
+ np = &htbl->buckets[hash_val];
+ for (n = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock));
+ n != NULL;
+ n = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) {
+ if (n == neigh) {
+ /* found */
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from parent list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ return;
+ } else {
+ np = &n->hnext;
+ }
+ }
+}
+
+static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh __rcu **buckets;
+ u32 size;
+
+ clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
+ ntbl->htbl = NULL;
+ htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
+ if (!htbl)
+ return -ENOMEM;
+ size = roundup_pow_of_two(arp_tbl.gc_thresh3);
+ buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
+ if (!buckets) {
+ kfree(htbl);
+ return -ENOMEM;
+ }
+ htbl->size = size;
+ htbl->mask = (size - 1);
+ htbl->buckets = buckets;
+ RCU_INIT_POINTER(ntbl->htbl, htbl);
+ htbl->ntbl = ntbl;
+ atomic_set(&ntbl->entries, 0);
+
+ /* start garbage collection */
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
+
+ return 0;
+}
+
+static void neigh_hash_free_rcu(struct rcu_head *head)
+{
+ struct ipoib_neigh_hash *htbl = container_of(head,
+ struct ipoib_neigh_hash,
+ rcu);
+ struct ipoib_neigh __rcu **buckets = htbl->buckets;
+ struct ipoib_neigh_table *ntbl = htbl->ntbl;
+
+ kvfree(buckets);
+ kfree(htbl);
+ complete(&ntbl->deleted);
+}
+
+void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long flags;
+ int i;
+
+ /* remove all neigh connected to a given path or mcast */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+
+ if (!htbl)
+ goto out_unlock;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) != NULL) {
+ /* delete neighs belong to this parent */
+ if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from parent list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ } else {
+ np = &neigh->hnext;
+ }
+
+ }
+ }
+out_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long flags;
+ int i, wait_flushed = 0;
+
+ init_completion(&priv->ntbl.flushed);
+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+ if (!htbl)
+ goto out_unlock;
+
+ wait_flushed = atomic_read(&priv->ntbl.entries);
+ if (!wait_flushed)
+ goto free_htbl;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) != NULL) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from path/mc list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ }
+ }
+
+free_htbl:
+ rcu_assign_pointer(ntbl->htbl, NULL);
+ call_rcu(&htbl->rcu, neigh_hash_free_rcu);
+
+out_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (wait_flushed)
+ wait_for_completion(&priv->ntbl.flushed);
+}
+
+static void ipoib_neigh_hash_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "%s\n", __func__);
+ init_completion(&priv->ntbl.deleted);
+
+ cancel_delayed_work_sync(&priv->neigh_reap_task);
+
+ ipoib_flush_neighs(priv);
+
+ wait_for_completion(&priv->ntbl.deleted);
+}
+
+static void ipoib_napi_add(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
+ IPOIB_NUM_WC);
+ netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
+ MAX_SEND_CQE);
+}
+
+static void ipoib_napi_del(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_del(&priv->recv_napi);
+ netif_napi_del(&priv->send_napi);
+}
+
+static void ipoib_dev_uninit_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_transport_dev_cleanup(dev);
+
+ ipoib_napi_del(dev);
+
+ ipoib_cm_dev_cleanup(dev);
+
+ kfree(priv->rx_ring);
+ vfree(priv->tx_ring);
+
+ priv->rx_ring = NULL;
+ priv->tx_ring = NULL;
+}
+
+static int ipoib_dev_init_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ u8 addr_mod[3];
+
+ ipoib_napi_add(dev);
+
+ /* Allocate RX/TX "rings" to hold queued skbs */
+ priv->rx_ring = kcalloc(ipoib_recvq_size,
+ sizeof(*priv->rx_ring),
+ GFP_KERNEL);
+ if (!priv->rx_ring)
+ goto out;
+
+ priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
+ sizeof(*priv->tx_ring)));
+ if (!priv->tx_ring) {
+ pr_warn("%s: failed to allocate TX ring (%d entries)\n",
+ priv->ca->name, ipoib_sendq_size);
+ goto out_rx_ring_cleanup;
+ }
+
+ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
+
+ if (ipoib_transport_dev_init(dev, priv->ca)) {
+ pr_warn("%s: ipoib_transport_dev_init failed\n",
+ priv->ca->name);
+ goto out_tx_ring_cleanup;
+ }
+
+ /* after qp created set dev address */
+ addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff;
+ addr_mod[1] = (priv->qp->qp_num >> 8) & 0xff;
+ addr_mod[2] = (priv->qp->qp_num) & 0xff;
+ dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod));
+
+ return 0;
+
+out_tx_ring_cleanup:
+ vfree(priv->tx_ring);
+
+out_rx_ring_cleanup:
+ kfree(priv->rx_ring);
+
+out:
+ ipoib_napi_del(dev);
+ return -ENOMEM;
+}
+
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->rn_ops->ndo_eth_ioctl)
+ return -EOPNOTSUPP;
+
+ return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
+}
+
+static int ipoib_dev_init(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = -ENOMEM;
+
+ priv->qp = NULL;
+
+ /*
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
+ */
+ priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
+ if (!priv->wq) {
+ pr_warn("%s: failed to allocate device WQ\n", dev->name);
+ goto out;
+ }
+
+ /* create pd, which used both for control and datapath*/
+ priv->pd = ib_alloc_pd(priv->ca, 0);
+ if (IS_ERR(priv->pd)) {
+ pr_warn("%s: failed to allocate PD\n", priv->ca->name);
+ goto clean_wq;
+ }
+
+ ret = priv->rn_ops->ndo_init(dev);
+ if (ret) {
+ pr_warn("%s failed to init HW resource\n", dev->name);
+ goto out_free_pd;
+ }
+
+ ret = ipoib_neigh_hash_init(priv);
+ if (ret) {
+ pr_warn("%s failed to init neigh hash\n", dev->name);
+ goto out_dev_uninit;
+ }
+
+ if (dev->flags & IFF_UP) {
+ if (ipoib_ib_dev_open(dev)) {
+ pr_warn("%s failed to open device\n", dev->name);
+ ret = -ENODEV;
+ goto out_hash_uninit;
+ }
+ }
+
+ return 0;
+
+out_hash_uninit:
+ ipoib_neigh_hash_uninit(dev);
+
+out_dev_uninit:
+ ipoib_ib_dev_cleanup(dev);
+
+out_free_pd:
+ if (priv->pd) {
+ ib_dealloc_pd(priv->pd);
+ priv->pd = NULL;
+ }
+
+clean_wq:
+ if (priv->wq) {
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * This must be called before doing an unregister_netdev on a parent device to
+ * shutdown the IB event handler.
+ */
+static void ipoib_parent_unregister_pre(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ /*
+ * ipoib_set_mac checks netif_running before pushing work, clearing
+ * running ensures the it will not add more work.
+ */
+ rtnl_lock();
+ dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
+ rtnl_unlock();
+
+ /* ipoib_event() cannot be running once this returns */
+ ib_unregister_event_handler(&priv->event_handler);
+
+ /*
+ * Work on the queue grabs the rtnl lock, so this cannot be done while
+ * also holding it.
+ */
+ flush_workqueue(ipoib_workqueue);
+}
+
+static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
+{
+ priv->hca_caps = priv->ca->attrs.device_cap_flags;
+ priv->kernel_caps = priv->ca->attrs.kernel_cap_flags;
+
+ if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+ priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ if (priv->kernel_caps & IBK_UD_TSO)
+ priv->dev->hw_features |= NETIF_F_TSO;
+
+ priv->dev->features |= priv->dev->hw_features;
+ }
+}
+
+static int ipoib_parent_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ib_port_attr attr;
+ int result;
+
+ result = ib_query_port(priv->ca, priv->port, &attr);
+ if (result) {
+ pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
+ priv->port);
+ return result;
+ }
+ priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
+
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+
+ result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
+ if (result) {
+ pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+ dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid));
+
+ SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_port = priv->port - 1;
+ /* Let's set this one too for backwards compatibility. */
+ priv->dev->dev_id = priv->port - 1;
+
+ return 0;
+}
+
+static void ipoib_child_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
+ set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ if (memchr_inv(priv->dev->dev_addr, 0, INFINIBAND_ALEN))
+ memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
+ sizeof(priv->local_gid));
+ else {
+ __dev_addr_set(priv->dev, ppriv->dev->dev_addr,
+ INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid,
+ sizeof(priv->local_gid));
+ }
+}
+
+static int ipoib_ndo_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ int rc;
+ struct rdma_netdev *rn = netdev_priv(ndev);
+
+ if (priv->parent) {
+ ipoib_child_init(ndev);
+ } else {
+ rc = ipoib_parent_init(ndev);
+ if (rc)
+ return rc;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ rn->mtu = priv->mcast_mtu;
+ ndev->max_mtu = IPOIB_CM_MTU;
+
+ ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ priv->pkey |= 0x8000;
+
+ ndev->broadcast[8] = priv->pkey >> 8;
+ ndev->broadcast[9] = priv->pkey & 0xff;
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ ipoib_set_dev_features(priv);
+
+ rc = ipoib_dev_init(ndev);
+ if (rc) {
+ pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
+ priv->ca->name, priv->dev->name, priv->port, rc);
+ return rc;
+ }
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ dev_hold(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_add_tail(&priv->list, &ppriv->child_intfs);
+ up_write(&ppriv->vlan_rwsem);
+ }
+
+ return 0;
+}
+
+static void ipoib_ndo_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ASSERT_RTNL();
+
+ /*
+ * ipoib_remove_one guarantees the children are removed before the
+ * parent, and that is the only place where a parent can be removed.
+ */
+ WARN_ON(!list_empty(&priv->child_intfs));
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+ }
+
+ ipoib_neigh_hash_uninit(dev);
+
+ ipoib_ib_dev_cleanup(dev);
+
+ /* no more works over the priv->wq */
+ if (priv->wq) {
+ /* See ipoib_mcast_carrier_on_task() */
+ WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
+
+ if (priv->parent)
+ dev_put(priv->parent);
+}
+
+static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
+}
+
+static int ipoib_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int err;
+
+ err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
+ if (err)
+ return err;
+
+ ivf->vf = vf;
+ memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
+
+ return 0;
+}
+
+static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
+ return -EINVAL;
+
+ return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
+}
+
+static int ipoib_get_vf_guid(struct net_device *dev, int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
+}
+
+static int ipoib_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
+}
+
+static const struct header_ops ipoib_header_ops = {
+ .create = ipoib_hard_header,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_pf = {
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
+ .ndo_open = ipoib_open,
+ .ndo_stop = ipoib_stop,
+ .ndo_change_mtu = ipoib_change_mtu,
+ .ndo_fix_features = ipoib_fix_features,
+ .ndo_start_xmit = ipoib_start_xmit,
+ .ndo_tx_timeout = ipoib_timeout,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
+ .ndo_set_vf_link_state = ipoib_set_vf_link_state,
+ .ndo_get_vf_config = ipoib_get_vf_config,
+ .ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_get_vf_guid = ipoib_get_vf_guid,
+ .ndo_set_vf_guid = ipoib_set_vf_guid,
+ .ndo_set_mac_address = ipoib_set_mac,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_eth_ioctl = ipoib_ioctl,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_vf = {
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
+ .ndo_open = ipoib_open,
+ .ndo_stop = ipoib_stop,
+ .ndo_change_mtu = ipoib_change_mtu,
+ .ndo_fix_features = ipoib_fix_features,
+ .ndo_start_xmit = ipoib_start_xmit,
+ .ndo_tx_timeout = ipoib_timeout,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_eth_ioctl = ipoib_ioctl,
+};
+
+static const struct net_device_ops ipoib_netdev_default_pf = {
+ .ndo_init = ipoib_dev_init_default,
+ .ndo_uninit = ipoib_dev_uninit_default,
+ .ndo_open = ipoib_ib_dev_open_default,
+ .ndo_stop = ipoib_ib_dev_stop_default,
+};
+
+void ipoib_setup_common(struct net_device *dev)
+{
+ dev->header_ops = &ipoib_header_ops;
+ dev->netdev_ops = &ipoib_netdev_default_pf;
+
+ ipoib_set_ethtool_ops(dev);
+
+ dev->watchdog_timeo = HZ;
+
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+
+ dev->hard_header_len = IPOIB_HARD_LEN;
+ dev->addr_len = INFINIBAND_ALEN;
+ dev->type = ARPHRD_INFINIBAND;
+ dev->tx_queue_len = ipoib_sendq_size * 2;
+ dev->features = (NETIF_F_VLAN_CHALLENGED |
+ NETIF_F_HIGHDMA);
+ netif_keep_dst(dev);
+
+ memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
+
+ /*
+ * unregister_netdev always frees the netdev, we use this mode
+ * consistently to unify all the various unregister paths, including
+ * those connected to rtnl_link_ops which require it.
+ */
+ dev->needs_free_netdev = true;
+}
+
+static void ipoib_build_priv(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ priv->dev = dev;
+ spin_lock_init(&priv->lock);
+ init_rwsem(&priv->vlan_rwsem);
+ mutex_init(&priv->mcast_mutex);
+
+ INIT_LIST_HEAD(&priv->path_list);
+ INIT_LIST_HEAD(&priv->child_intfs);
+ INIT_LIST_HEAD(&priv->dead_ahs);
+ INIT_LIST_HEAD(&priv->multicast_list);
+
+ INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
+ INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
+ INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
+ INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
+ INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
+ INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
+ INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
+}
+
+static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port,
+ const char *name)
+{
+ struct net_device *dev;
+
+ dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common);
+ if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
+ return dev;
+
+ dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
+ ipoib_setup_common);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ return dev;
+}
+
+int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
+ struct net_device *dev)
+{
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_dev_priv *priv;
+ int rc;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ca = hca;
+ priv->port = port;
+
+ rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common, dev);
+ if (rc) {
+ if (rc != -EOPNOTSUPP)
+ goto out;
+
+ rn->send = ipoib_send;
+ rn->attach_mcast = ipoib_mcast_attach;
+ rn->detach_mcast = ipoib_mcast_detach;
+ rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
+ }
+
+ priv->rn_ops = dev->netdev_ops;
+
+ if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION)
+ dev->netdev_ops = &ipoib_netdev_ops_vf;
+ else
+ dev->netdev_ops = &ipoib_netdev_ops_pf;
+
+ rn->clnt_priv = priv;
+ /*
+ * Only the child register_netdev flows can handle priv_destructor
+ * being set, so we force it to NULL here and handle manually until it
+ * is safe to turn on.
+ */
+ priv->next_priv_destructor = dev->priv_destructor;
+ dev->priv_destructor = NULL;
+
+ ipoib_build_priv(dev);
+
+ return 0;
+
+out:
+ kfree(priv);
+ return rc;
+}
+
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
+ const char *name)
+{
+ struct net_device *dev;
+ int rc;
+
+ dev = ipoib_alloc_netdev(hca, port, name);
+ if (IS_ERR(dev))
+ return dev;
+
+ rc = ipoib_intf_init(hca, port, name, dev);
+ if (rc) {
+ free_netdev(dev);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * Upon success the caller must ensure ipoib_intf_free is called or
+ * register_netdevice succeed'd and priv_destructor is set to
+ * ipoib_intf_free.
+ */
+ return dev;
+}
+
+void ipoib_intf_free(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ dev->priv_destructor = priv->next_priv_destructor;
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+
+ /*
+ * There are some error flows around register_netdev failing that may
+ * attempt to call priv_destructor twice, prevent that from happening.
+ */
+ dev->priv_destructor = NULL;
+
+ /* unregister/destroy is very complicated. Make bugs more obvious. */
+ rn->clnt_priv = NULL;
+
+ kfree(priv);
+}
+
+static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ return sysfs_emit(buf, "0x%04x\n", priv->pkey);
+}
+static DEVICE_ATTR_RO(pkey);
+
+static ssize_t umcast_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ return sysfs_emit(buf, "%d\n",
+ test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
+}
+
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ if (umcast_val > 0) {
+ set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+ ipoib_warn(priv, "ignoring multicast groups joined directly "
+ "by userspace\n");
+ } else
+ clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+}
+
+static ssize_t umcast_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
+
+ ipoib_set_umcast(to_net_dev(dev), umcast_val);
+
+ return count;
+}
+static DEVICE_ATTR_RW(umcast);
+
+int ipoib_add_umcast_attr(struct net_device *dev)
+{
+ return device_create_file(&dev->dev, &dev_attr_umcast);
+}
+
+static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *netdev = priv->dev;
+
+ netif_addr_lock_bh(netdev);
+
+ memcpy(&priv->local_gid.global.interface_id,
+ &gid->global.interface_id,
+ sizeof(gid->global.interface_id));
+ dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid));
+ clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ netif_addr_unlock_bh(netdev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(child_priv, &priv->child_intfs, list)
+ set_base_guid(child_priv, gid);
+ up_read(&priv->vlan_rwsem);
+ }
+}
+
+static int ipoib_check_lladdr(struct net_device *dev,
+ struct sockaddr_storage *ss)
+{
+ union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
+ int ret = 0;
+
+ netif_addr_lock_bh(dev);
+
+ /* Make sure the QPN, reserved and subnet prefix match the current
+ * lladdr, it also makes sure the lladdr is unicast.
+ */
+ if (memcmp(dev->dev_addr, ss->__data,
+ 4 + sizeof(gid->global.subnet_prefix)) ||
+ gid->global.interface_id == 0)
+ ret = -EINVAL;
+
+ netif_addr_unlock_bh(dev);
+
+ return ret;
+}
+
+static int ipoib_set_mac(struct net_device *dev, void *addr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sockaddr_storage *ss = addr;
+ int ret;
+
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
+ return -EBUSY;
+
+ ret = ipoib_check_lladdr(dev, ss);
+ if (ret)
+ return ret;
+
+ set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+
+ queue_work(ipoib_workqueue, &priv->flush_light);
+
+ return 0;
+}
+
+static ssize_t create_child_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int pkey;
+ int ret;
+
+ if (sscanf(buf, "%i", &pkey) != 1)
+ return -EINVAL;
+
+ if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
+ return -EINVAL;
+
+ ret = ipoib_vlan_add(to_net_dev(dev), pkey);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_WO(create_child);
+
+static ssize_t delete_child_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int pkey;
+ int ret;
+
+ if (sscanf(buf, "%i", &pkey) != 1)
+ return -EINVAL;
+
+ if (pkey < 0 || pkey > 0xffff)
+ return -EINVAL;
+
+ ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
+
+ return ret ? ret : count;
+
+}
+static DEVICE_ATTR_WO(delete_child);
+
+int ipoib_add_pkey_attr(struct net_device *dev)
+{
+ return device_create_file(&dev->dev, &dev_attr_pkey);
+}
+
+/*
+ * We erroneously exposed the iface's port number in the dev_id
+ * sysfs field long after dev_port was introduced for that purpose[1],
+ * and we need to stop everyone from relying on that.
+ * Let's overload the shower routine for the dev_id file here
+ * to gently bring the issue up.
+ *
+ * [1] https://www.spinics.net/lists/netdev/msg272123.html
+ */
+static ssize_t dev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+
+ /*
+ * ndev->dev_port will be equal to 0 in old kernel prior to commit
+ * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
+ * port numbers") Zero was chosen as special case for user space
+ * applications to fallback and query dev_id to check if it has
+ * different value or not.
+ *
+ * Don't print warning in such scenario.
+ *
+ * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
+ */
+ if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
+ netdev_info_once(ndev,
+ "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
+ current->comm);
+
+ return sysfs_emit(buf, "%#x\n", ndev->dev_id);
+}
+static DEVICE_ATTR_RO(dev_id);
+
+static int ipoib_intercept_dev_id_attr(struct net_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_dev_id);
+ return device_create_file(&dev->dev, &dev_attr_dev_id);
+}
+
+static struct net_device *ipoib_add_port(const char *format,
+ struct ib_device *hca, u32 port)
+{
+ struct rtnl_link_ops *ops = ipoib_get_link_ops();
+ struct rdma_netdev_alloc_params params;
+ struct ipoib_dev_priv *priv;
+ struct net_device *ndev;
+ int result;
+
+ ndev = ipoib_intf_alloc(hca, port, format);
+ if (IS_ERR(ndev)) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
+ PTR_ERR(ndev));
+ return ndev;
+ }
+ priv = ipoib_priv(ndev);
+
+ INIT_IB_EVENT_HANDLER(&priv->event_handler,
+ priv->ca, ipoib_event);
+ ib_register_event_handler(&priv->event_handler);
+
+ /* call event handler to ensure pkey in sync */
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
+
+ result = register_netdev(ndev);
+ if (result) {
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+ hca->name, port, result);
+
+ ipoib_parent_unregister_pre(ndev);
+ ipoib_intf_free(ndev);
+ free_netdev(ndev);
+
+ return ERR_PTR(result);
+ }
+
+ if (hca->ops.rdma_netdev_get_params) {
+ int rc = hca->ops.rdma_netdev_get_params(hca, port,
+ RDMA_NETDEV_IPOIB,
+ &params);
+
+ if (!rc && ops->priv_size < params.sizeof_priv)
+ ops->priv_size = params.sizeof_priv;
+ }
+ /*
+ * We cannot set priv_destructor before register_netdev because we
+ * need priv to be always valid during the error flow to execute
+ * ipoib_parent_unregister_pre(). Instead handle it manually and only
+ * enter priv_destructor mode once we are completely registered.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ if (ipoib_intercept_dev_id_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_cm_add_mode_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_pkey_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_umcast_attr(ndev))
+ goto sysfs_failed;
+ if (device_create_file(&ndev->dev, &dev_attr_create_child))
+ goto sysfs_failed;
+ if (device_create_file(&ndev->dev, &dev_attr_delete_child))
+ goto sysfs_failed;
+
+ return ndev;
+
+sysfs_failed:
+ ipoib_parent_unregister_pre(ndev);
+ unregister_netdev(ndev);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int ipoib_add_one(struct ib_device *device)
+{
+ struct list_head *dev_list;
+ struct net_device *dev;
+ struct ipoib_dev_priv *priv;
+ unsigned int p;
+ int count = 0;
+
+ dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
+ if (!dev_list)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(dev_list);
+
+ rdma_for_each_port (device, p) {
+ if (!rdma_protocol_ib(device, p))
+ continue;
+ dev = ipoib_add_port("ib%d", device, p);
+ if (!IS_ERR(dev)) {
+ priv = ipoib_priv(dev);
+ list_add_tail(&priv->list, dev_list);
+ count++;
+ }
+ }
+
+ if (!count) {
+ kfree(dev_list);
+ return -EOPNOTSUPP;
+ }
+
+ ib_set_client_data(device, &ipoib_client, dev_list);
+ return 0;
+}
+
+static void ipoib_remove_one(struct ib_device *device, void *client_data)
+{
+ struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
+ struct list_head *dev_list = client_data;
+
+ list_for_each_entry_safe(priv, tmp, dev_list, list) {
+ LIST_HEAD(head);
+ ipoib_parent_unregister_pre(priv->dev);
+
+ rtnl_lock();
+
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
+ list)
+ unregister_netdevice_queue(cpriv->dev, &head);
+ unregister_netdevice_queue(priv->dev, &head);
+ unregister_netdevice_many(&head);
+
+ rtnl_unlock();
+ }
+
+ kfree(dev_list);
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static struct notifier_block ipoib_netdev_notifier = {
+ .notifier_call = ipoib_netdev_event,
+};
+#endif
+
+static int __init ipoib_init_module(void)
+{
+ int ret;
+
+ ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
+ ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
+ ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
+
+ ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
+ ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
+ ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+ ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
+#endif
+
+ /*
+ * When copying small received packets, we only copy from the
+ * linear data part of the SKB, so we rely on this condition.
+ */
+ BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
+
+ ipoib_register_debugfs();
+
+ /*
+ * We create a global workqueue here that is used for all flush
+ * operations. However, if you attempt to flush a workqueue
+ * from a task on that same workqueue, it deadlocks the system.
+ * We want to be able to flush the tasks associated with a
+ * specific net device, so we also create a workqueue for each
+ * netdevice. We queue up the tasks for that device only on
+ * its private workqueue, and we only queue up flush events
+ * on our global flush workqueue. This avoids the deadlocks.
+ */
+ ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
+ if (!ipoib_workqueue) {
+ ret = -ENOMEM;
+ goto err_fs;
+ }
+
+ ib_sa_register_client(&ipoib_sa_client);
+
+ ret = ib_register_client(&ipoib_client);
+ if (ret)
+ goto err_sa;
+
+ ret = ipoib_netlink_init();
+ if (ret)
+ goto err_client;
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ register_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
+ return 0;
+
+err_client:
+ ib_unregister_client(&ipoib_client);
+
+err_sa:
+ ib_sa_unregister_client(&ipoib_sa_client);
+ destroy_workqueue(ipoib_workqueue);
+
+err_fs:
+ ipoib_unregister_debugfs();
+
+ return ret;
+}
+
+static void __exit ipoib_cleanup_module(void)
+{
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ unregister_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
+ ipoib_netlink_fini();
+ ib_unregister_client(&ipoib_client);
+ ib_sa_unregister_client(&ipoib_sa_client);
+ ipoib_unregister_debugfs();
+ destroy_workqueue(ipoib_workqueue);
+}
+
+module_init(ipoib_init_module);
+module_exit(ipoib_cleanup_module);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
new file mode 100644
index 000000000..5b3154503
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -0,0 +1,1051 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/igmp.h>
+#include <linux/inetdevice.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+
+#include <net/dst.h>
+
+#include "ipoib.h"
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static int mcast_debug_level;
+
+module_param(mcast_debug_level, int, 0644);
+MODULE_PARM_DESC(mcast_debug_level,
+ "Enable multicast debug tracing if > 0");
+#endif
+
+struct ipoib_mcast_iter {
+ struct net_device *dev;
+ union ib_gid mgid;
+ unsigned long created;
+ unsigned int queuelen;
+ unsigned int complete;
+ unsigned int send_only;
+};
+
+/* join state that allows creating mcg with sendonly member request */
+#define SENDONLY_FULLMEMBER_JOIN 8
+
+/*
+ * This should be called with the priv->lock held
+ */
+static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
+ struct ipoib_mcast *mcast,
+ bool delay)
+{
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+
+ /*
+ * We will be scheduling *something*, so cancel whatever is
+ * currently scheduled first
+ */
+ cancel_delayed_work(&priv->mcast_task);
+ if (mcast && delay) {
+ /*
+ * We had a failure and want to schedule a retry later
+ */
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ mcast->delay_until = jiffies + (mcast->backoff * HZ);
+ /*
+ * Mark this mcast for its delay, but restart the
+ * task immediately. The join task will make sure to
+ * clear out all entries without delays, and then
+ * schedule itself to run again when the earliest
+ * delay expires
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ } else if (delay) {
+ /*
+ * Special case of retrying after a failure to
+ * allocate the broadcast multicast group, wait
+ * 1 second and try again
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, HZ);
+ } else
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+}
+
+static void ipoib_mcast_free(struct ipoib_mcast *mcast)
+{
+ struct net_device *dev = mcast->dev;
+ int tx_dropped = 0;
+
+ ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ /* remove all neigh connected to this mcast */
+ ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
+
+ if (mcast->ah)
+ ipoib_put_ah(mcast->ah);
+
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ ++tx_dropped;
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
+ }
+
+ netif_tx_lock_bh(dev);
+ dev->stats.tx_dropped += tx_dropped;
+ netif_tx_unlock_bh(dev);
+
+ kfree(mcast);
+}
+
+static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
+{
+ struct ipoib_mcast *mcast;
+
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+ if (!mcast)
+ return NULL;
+
+ mcast->dev = dev;
+ mcast->created = jiffies;
+ mcast->delay_until = jiffies;
+ mcast->backoff = 1;
+
+ INIT_LIST_HEAD(&mcast->list);
+ INIT_LIST_HEAD(&mcast->neigh_list);
+ skb_queue_head_init(&mcast->pkt_queue);
+
+ return mcast;
+}
+
+static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node *n = priv->multicast_tree.rb_node;
+
+ while (n) {
+ struct ipoib_mcast *mcast;
+ int ret;
+
+ mcast = rb_entry(n, struct ipoib_mcast, rb_node);
+
+ ret = memcmp(mgid, mcast->mcmember.mgid.raw,
+ sizeof (union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ return mcast;
+ }
+
+ return NULL;
+}
+
+static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
+
+ while (*n) {
+ struct ipoib_mcast *tmcast;
+ int ret;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
+
+ ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
+ sizeof (union ib_gid));
+ if (ret < 0)
+ n = &pn->rb_left;
+ else if (ret > 0)
+ n = &pn->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
+
+ return 0;
+}
+
+static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+ struct ib_sa_mcmember_rec *mcmember)
+{
+ struct net_device *dev = mcast->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_ah *ah;
+ struct rdma_ah_attr av;
+ int ret;
+ int set_qkey = 0;
+ int mtu;
+
+ mcast->mcmember = *mcmember;
+
+ /* Set the multicast MTU and cached Q_Key before we attach if it's
+ * the broadcast group.
+ */
+ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ sizeof (union ib_gid))) {
+ spin_lock_irq(&priv->lock);
+ if (!priv->broadcast) {
+ spin_unlock_irq(&priv->lock);
+ return -EAGAIN;
+ }
+ /*update priv member according to the new mcast*/
+ priv->broadcast->mcmember.qkey = mcmember->qkey;
+ priv->broadcast->mcmember.mtu = mcmember->mtu;
+ priv->broadcast->mcmember.traffic_class = mcmember->traffic_class;
+ priv->broadcast->mcmember.rate = mcmember->rate;
+ priv->broadcast->mcmember.sl = mcmember->sl;
+ priv->broadcast->mcmember.flow_label = mcmember->flow_label;
+ priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
+ /* assume if the admin and the mcast are the same both can be changed */
+ mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
+ priv->broadcast->mcmember.mtu);
+ if (priv->mcast_mtu == priv->admin_mtu)
+ priv->admin_mtu = IPOIB_UD_MTU(mtu);
+ priv->mcast_mtu = IPOIB_UD_MTU(mtu);
+ rn->mtu = priv->mcast_mtu;
+
+ priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
+ spin_unlock_irq(&priv->lock);
+ priv->tx_wr.remote_qkey = priv->qkey;
+ set_qkey = 1;
+ }
+
+ if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ ipoib_warn(priv, "multicast group %pI6 already attached\n",
+ mcast->mcmember.mgid.raw);
+
+ return 0;
+ }
+
+ ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
+ be16_to_cpu(mcast->mcmember.mlid),
+ set_qkey, priv->qkey);
+ if (ret < 0) {
+ ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
+ return ret;
+ }
+ }
+
+ memset(&av, 0, sizeof(av));
+ av.type = rdma_ah_find_type(priv->ca, priv->port);
+ rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid));
+ rdma_ah_set_port_num(&av, priv->port);
+ rdma_ah_set_sl(&av, mcast->mcmember.sl);
+ rdma_ah_set_static_rate(&av, mcast->mcmember.rate);
+
+ rdma_ah_set_grh(&av, &mcast->mcmember.mgid,
+ be32_to_cpu(mcast->mcmember.flow_label),
+ 0, mcast->mcmember.hop_limit,
+ mcast->mcmember.traffic_class);
+
+ ah = ipoib_create_ah(dev, priv->pd, &av);
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
+ }
+ spin_lock_irq(&priv->lock);
+ mcast->ah = ah;
+ spin_unlock_irq(&priv->lock);
+
+ ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
+ mcast->mcmember.mgid.raw,
+ mcast->ah->ah,
+ be16_to_cpu(mcast->mcmember.mlid),
+ mcast->mcmember.sl);
+
+ /* actually send any queued packets */
+ netif_tx_lock_bh(dev);
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
+
+ netif_tx_unlock_bh(dev);
+
+ skb->dev = dev;
+
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
+ netif_tx_lock_bh(dev);
+ }
+ netif_tx_unlock_bh(dev);
+
+ return 0;
+}
+
+void ipoib_mcast_carrier_on_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ carrier_on_task);
+ struct ib_port_attr attr;
+
+ if (ib_query_port(priv->ca, priv->port, &attr) ||
+ attr.state != IB_PORT_ACTIVE) {
+ ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
+ return;
+ }
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed. However, ipoib_stop() will attempt to flush
+ * the workqueue while holding the rtnl lock, so loop
+ * on trylock until either we get the lock or we see
+ * FLAG_OPER_UP go away as that signals that we are bailing
+ * and can safely ignore the carrier on work.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
+ if (!ipoib_cm_admin_enabled(priv->dev))
+ dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
+ netif_carrier_on(priv->dev);
+ rtnl_unlock();
+}
+
+static int ipoib_mcast_join_complete(int status,
+ struct ib_sa_multicast *multicast)
+{
+ struct ipoib_mcast *mcast = multicast->context;
+ struct net_device *dev = mcast->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
+ "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+
+ /* We trap for port events ourselves. */
+ if (status == -ENETRESET) {
+ status = 0;
+ goto out;
+ }
+
+ if (!status)
+ status = ipoib_mcast_join_finish(mcast, &multicast->rec);
+
+ if (!status) {
+ mcast->backoff = 1;
+ mcast->delay_until = jiffies;
+
+ /*
+ * Defer carrier on work to priv->wq to avoid a
+ * deadlock on rtnl_lock here. Requeue our multicast
+ * work too, which will end up happening right after
+ * our carrier on task work and will allow us to
+ * send out all of the non-broadcast joins
+ */
+ if (mcast == priv->broadcast) {
+ spin_lock_irq(&priv->lock);
+ queue_work(priv->wq, &priv->carrier_on_task);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ goto out_locked;
+ }
+ } else {
+ bool silent_fail =
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ status == -EINVAL;
+
+ if (mcast->logcount < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN ||
+ silent_fail) {
+ ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ }
+
+ if (!silent_fail)
+ mcast->logcount++;
+ }
+
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ mcast->backoff >= 2) {
+ /*
+ * We only retry sendonly joins once before we drop
+ * the packet and quit trying to deal with the
+ * group. However, we leave the group in the
+ * mcast list as an unjoined group. If we want to
+ * try joining again, we simply queue up a packet
+ * and restart the join thread. The empty queue
+ * is why the join thread ignores this group.
+ */
+ mcast->backoff = 1;
+ netif_tx_lock_bh(dev);
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
+ }
+ netif_tx_unlock_bh(dev);
+ } else {
+ spin_lock_irq(&priv->lock);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ goto out_locked;
+ }
+ }
+out:
+ spin_lock_irq(&priv->lock);
+out_locked:
+ /*
+ * Make sure to set mcast->mc before we clear the busy flag to avoid
+ * racing with code that checks for BUSY before checking mcast->mc
+ */
+ if (status)
+ mcast->mc = NULL;
+ else
+ mcast->mc = multicast;
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ spin_unlock_irq(&priv->lock);
+ complete(&mcast->done);
+
+ return status;
+}
+
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_sa_multicast *multicast;
+ struct ib_sa_mcmember_rec rec = {
+ .join_state = 1
+ };
+ ib_sa_comp_mask comp_mask;
+ int ret = 0;
+
+ if (!priv->broadcast ||
+ !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return -EINVAL;
+
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
+ ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
+
+ rec.mgid = mcast->mcmember.mgid;
+ rec.port_gid = priv->local_gid;
+ rec.pkey = cpu_to_be16(priv->pkey);
+
+ comp_mask =
+ IB_SA_MCMEMBER_REC_MGID |
+ IB_SA_MCMEMBER_REC_PORT_GID |
+ IB_SA_MCMEMBER_REC_PKEY |
+ IB_SA_MCMEMBER_REC_JOIN_STATE;
+
+ if (mcast != priv->broadcast) {
+ /*
+ * RFC 4391:
+ * The MGID MUST use the same P_Key, Q_Key, SL, MTU,
+ * and HopLimit as those used in the broadcast-GID. The rest
+ * of attributes SHOULD follow the values used in the
+ * broadcast-GID as well.
+ */
+ comp_mask |=
+ IB_SA_MCMEMBER_REC_QKEY |
+ IB_SA_MCMEMBER_REC_MTU_SELECTOR |
+ IB_SA_MCMEMBER_REC_MTU |
+ IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
+ IB_SA_MCMEMBER_REC_RATE_SELECTOR |
+ IB_SA_MCMEMBER_REC_RATE |
+ IB_SA_MCMEMBER_REC_SL |
+ IB_SA_MCMEMBER_REC_FLOW_LABEL |
+ IB_SA_MCMEMBER_REC_HOP_LIMIT;
+
+ rec.qkey = priv->broadcast->mcmember.qkey;
+ rec.mtu_selector = IB_SA_EQ;
+ rec.mtu = priv->broadcast->mcmember.mtu;
+ rec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ rec.rate_selector = IB_SA_EQ;
+ rec.rate = priv->broadcast->mcmember.rate;
+ rec.sl = priv->broadcast->mcmember.sl;
+ rec.flow_label = priv->broadcast->mcmember.flow_label;
+ rec.hop_limit = priv->broadcast->mcmember.hop_limit;
+
+ /*
+ * Send-only IB Multicast joins work at the core IB layer but
+ * require specific SM support.
+ * We can use such joins here only if the current SM supports that feature.
+ * However, if not, we emulate an Ethernet multicast send,
+ * which does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
+ * do is to create the group if it doesn't exist as that
+ * most closely emulates the behavior, from a user space
+ * application perspective, of Ethernet multicast operation.
+ */
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ rec.join_state = SENDONLY_FULLMEMBER_JOIN;
+ }
+ spin_unlock_irq(&priv->lock);
+
+ multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
+ &rec, comp_mask, GFP_KERNEL,
+ ipoib_mcast_join_complete, mcast);
+ spin_lock_irq(&priv->lock);
+ if (IS_ERR(multicast)) {
+ ret = PTR_ERR(multicast);
+ ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ spin_unlock_irq(&priv->lock);
+ complete(&mcast->done);
+ spin_lock_irq(&priv->lock);
+ }
+ return 0;
+}
+
+void ipoib_mcast_join_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, mcast_task.work);
+ struct net_device *dev = priv->dev;
+ struct ib_port_attr port_attr;
+ unsigned long delay_until = 0;
+ struct ipoib_mcast *mcast = NULL;
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+
+ if (ib_query_port(priv->ca, priv->port, &port_attr)) {
+ ipoib_dbg(priv, "ib_query_port() failed\n");
+ return;
+ }
+ if (port_attr.state != IB_PORT_ACTIVE) {
+ ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
+ port_attr.state);
+ return;
+ }
+ priv->local_lid = port_attr.lid;
+ netif_addr_lock_bh(dev);
+
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ netif_addr_unlock_bh(dev);
+ return;
+ }
+ netif_addr_unlock_bh(dev);
+
+ spin_lock_irq(&priv->lock);
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ goto out;
+
+ if (!priv->broadcast) {
+ struct ipoib_mcast *broadcast;
+
+ broadcast = ipoib_mcast_alloc(dev);
+ if (!broadcast) {
+ ipoib_warn(priv, "failed to allocate broadcast group\n");
+ /*
+ * Restart us after a 1 second delay to retry
+ * creating our broadcast group and attaching to
+ * it. Until this succeeds, this ipoib dev is
+ * completely stalled (multicast wise).
+ */
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 1);
+ goto out;
+ }
+
+ memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ sizeof (union ib_gid));
+ priv->broadcast = broadcast;
+
+ __ipoib_mcast_add(dev, priv->broadcast);
+ }
+
+ if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
+ if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
+ mcast = priv->broadcast;
+ if (mcast->backoff > 1 &&
+ time_before(jiffies, mcast->delay_until)) {
+ delay_until = mcast->delay_until;
+ mcast = NULL;
+ }
+ }
+ goto out;
+ }
+
+ /*
+ * We'll never get here until the broadcast group is both allocated
+ * and attached
+ */
+ list_for_each_entry(mcast, &priv->multicast_list, list) {
+ if (IS_ERR_OR_NULL(mcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
+ (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ||
+ !skb_queue_empty(&mcast->pkt_queue))) {
+ if (mcast->backoff == 1 ||
+ time_after_eq(jiffies, mcast->delay_until)) {
+ /* Found the next unjoined group */
+ if (ipoib_mcast_join(dev, mcast)) {
+ spin_unlock_irq(&priv->lock);
+ return;
+ }
+ } else if (!delay_until ||
+ time_before(mcast->delay_until, delay_until))
+ delay_until = mcast->delay_until;
+ }
+ }
+
+ mcast = NULL;
+ ipoib_dbg_mcast(priv, "successfully started all multicast joins\n");
+
+out:
+ if (delay_until) {
+ cancel_delayed_work(&priv->mcast_task);
+ queue_delayed_work(priv->wq, &priv->mcast_task,
+ delay_until - jiffies);
+ }
+ if (mcast)
+ ipoib_mcast_join(dev, mcast);
+
+ spin_unlock_irq(&priv->lock);
+}
+
+void ipoib_mcast_start_thread(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ unsigned long flags;
+
+ ipoib_dbg_mcast(priv, "starting multicast thread\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void ipoib_mcast_stop_thread(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg_mcast(priv, "stopping multicast thread\n");
+
+ cancel_delayed_work_sync(&priv->mcast_task);
+}
+
+static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ int ret = 0;
+
+ if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
+
+ if (!IS_ERR_OR_NULL(mcast->mc))
+ ib_sa_free_multicast(mcast->mc);
+
+ if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ /* Remove ourselves from the multicast group */
+ ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
+ be16_to_cpu(mcast->mcmember.mlid));
+ if (ret)
+ ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
+ } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_dbg(priv, "leaving with no mcmember but not a "
+ "SENDONLY join\n");
+
+ return 0;
+}
+
+/*
+ * Check if the multicast group is sendonly. If so remove it from the maps
+ * and add to the remove list
+ */
+void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
+ struct list_head *remove_list)
+{
+ /* Is this multicast ? */
+ if (*mgid == 0xff) {
+ struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid);
+
+ if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, remove_list);
+ }
+ }
+}
+
+void ipoib_mcast_remove_list(struct list_head *remove_list)
+{
+ struct ipoib_mcast *mcast, *tmcast;
+
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
+ ipoib_mcast_leave(mcast->dev, mcast);
+ ipoib_mcast_free(mcast);
+ }
+}
+
+void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_mcast *mcast;
+ unsigned long flags;
+ void *mgid = daddr + 4;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
+ !priv->broadcast ||
+ !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
+ mcast = __ipoib_mcast_find(dev, mgid);
+ if (!mcast || !mcast->ah) {
+ if (!mcast) {
+ /* Let's create a new send only group now */
+ ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
+ mgid);
+
+ mcast = ipoib_mcast_alloc(dev);
+ if (!mcast) {
+ ipoib_warn(priv, "unable to allocate memory "
+ "for multicast structure\n");
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
+ set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
+ memcpy(mcast->mcmember.mgid.raw, mgid,
+ sizeof (union ib_gid));
+ __ipoib_mcast_add(dev, mcast);
+ list_add_tail(&mcast->list, &priv->multicast_list);
+ }
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
+ skb_queue_tail(&mcast->pkt_queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
+ if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ }
+ } else {
+ struct ipoib_neigh *neigh;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ neigh = ipoib_neigh_get(dev, daddr);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!neigh) {
+ neigh = ipoib_neigh_alloc(daddr, dev);
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
+ kref_get(&mcast->ah->ref);
+ neigh->ah = mcast->ah;
+ neigh->ah->valid = 1;
+ list_add_tail(&neigh->list, &mcast->neigh_list);
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah,
+ IB_MULTICAST_QPN);
+ if (neigh)
+ ipoib_neigh_put(neigh);
+ return;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void ipoib_mcast_dev_flush(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ LIST_HEAD(remove_list);
+ struct ipoib_mcast *mcast, *tmcast;
+ unsigned long flags;
+
+ mutex_lock(&priv->mcast_mutex);
+ ipoib_dbg_mcast(priv, "flushing multicast list\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, &remove_list);
+ }
+
+ if (priv->broadcast) {
+ rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&priv->broadcast->list, &remove_list);
+ priv->broadcast = NULL;
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ipoib_mcast_remove_list(&remove_list);
+ mutex_unlock(&priv->mcast_mutex);
+}
+
+static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
+{
+ /* reserved QPN, prefix, scope */
+ if (memcmp(addr, broadcast, 6))
+ return 0;
+ /* signature lower, pkey */
+ if (memcmp(addr + 7, broadcast + 7, 3))
+ return 0;
+ return 1;
+}
+
+void ipoib_mcast_restart_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, restart_task);
+ struct net_device *dev = priv->dev;
+ struct netdev_hw_addr *ha;
+ struct ipoib_mcast *mcast, *tmcast;
+ LIST_HEAD(remove_list);
+ struct ib_sa_mcmember_rec rec;
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ /*
+ * shortcut...on shutdown flush is called next, just
+ * let it do all the work
+ */
+ return;
+
+ ipoib_dbg_mcast(priv, "restarting multicast task\n");
+
+ netif_addr_lock_bh(dev);
+ spin_lock_irq(&priv->lock);
+
+ /*
+ * Unfortunately, the networking core only gives us a list of all of
+ * the multicast hardware addresses. We need to figure out which ones
+ * are new and which ones have been removed
+ */
+
+ /* Clear out the found flag */
+ list_for_each_entry(mcast, &priv->multicast_list, list)
+ clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
+
+ /* Mark all of the entries that are found or don't exist */
+ netdev_for_each_mc_addr(ha, dev) {
+ union ib_gid mgid;
+
+ if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
+ continue;
+
+ memcpy(mgid.raw, ha->addr + 4, sizeof(mgid));
+
+ mcast = __ipoib_mcast_find(dev, &mgid);
+ if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ struct ipoib_mcast *nmcast;
+
+ /* ignore group which is directly joined by userspace */
+ if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
+ !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
+ ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
+ mgid.raw);
+ continue;
+ }
+
+ /* Not found or send-only group, let's add a new entry */
+ ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
+ mgid.raw);
+
+ nmcast = ipoib_mcast_alloc(dev);
+ if (!nmcast) {
+ ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
+ continue;
+ }
+
+ set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
+
+ nmcast->mcmember.mgid = mgid;
+
+ if (mcast) {
+ /* Destroy the send only entry */
+ list_move_tail(&mcast->list, &remove_list);
+
+ rb_replace_node(&mcast->rb_node,
+ &nmcast->rb_node,
+ &priv->multicast_tree);
+ } else
+ __ipoib_mcast_add(dev, nmcast);
+
+ list_add_tail(&nmcast->list, &priv->multicast_list);
+ }
+
+ if (mcast)
+ set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
+ }
+
+ /* Remove all of the entries don't exist anymore */
+ list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
+ if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
+ !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+
+ /* Move to the remove list */
+ list_move_tail(&mcast->list, &remove_list);
+ }
+ }
+
+ spin_unlock_irq(&priv->lock);
+ netif_addr_unlock_bh(dev);
+
+ ipoib_mcast_remove_list(&remove_list);
+
+ /*
+ * Double check that we are still up
+ */
+ if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
+ spin_lock_irq(&priv->lock);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irq(&priv->lock);
+ }
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+
+struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
+{
+ struct ipoib_mcast_iter *iter;
+
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return NULL;
+
+ iter->dev = dev;
+ memset(iter->mgid.raw, 0, 16);
+
+ if (ipoib_mcast_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
+ struct rb_node *n;
+ struct ipoib_mcast *mcast;
+ int ret = 1;
+
+ spin_lock_irq(&priv->lock);
+
+ n = rb_first(&priv->multicast_tree);
+
+ while (n) {
+ mcast = rb_entry(n, struct ipoib_mcast, rb_node);
+
+ if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
+ sizeof (union ib_gid)) < 0) {
+ iter->mgid = mcast->mcmember.mgid;
+ iter->created = mcast->created;
+ iter->queuelen = skb_queue_len(&mcast->pkt_queue);
+ iter->complete = !!mcast->ah;
+ iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
+
+ ret = 0;
+
+ break;
+ }
+
+ n = rb_next(n);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
+ union ib_gid *mgid,
+ unsigned long *created,
+ unsigned int *queuelen,
+ unsigned int *complete,
+ unsigned int *send_only)
+{
+ *mgid = iter->mgid;
+ *created = iter->created;
+ *queuelen = iter->queuelen;
+ *complete = iter->complete;
+ *send_only = iter->send_only;
+}
+
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
new file mode 100644
index 000000000..9ad8d9856
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. - All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
+#include <net/rtnetlink.h>
+#include "ipoib.h"
+
+static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
+ [IFLA_IPOIB_PKEY] = { .type = NLA_U16 },
+ [IFLA_IPOIB_MODE] = { .type = NLA_U16 },
+ [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
+};
+
+static unsigned int ipoib_get_max_num_queues(void)
+{
+ return min_t(unsigned int, num_possible_cpus(), 128);
+}
+
+static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ u16 val;
+
+ if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
+ goto nla_put_failure;
+
+ val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
+ goto nla_put_failure;
+
+ val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+ if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u16 mode, umcast;
+ int ret = 0;
+
+ if (data[IFLA_IPOIB_MODE]) {
+ mode = nla_get_u16(data[IFLA_IPOIB_MODE]);
+ if (mode == IPOIB_MODE_DATAGRAM)
+ ret = ipoib_set_mode(dev, "datagram\n");
+ else if (mode == IPOIB_MODE_CONNECTED)
+ ret = ipoib_set_mode(dev, "connected\n");
+ else
+ ret = -EINVAL;
+
+ if (ret < 0)
+ goto out_err;
+ }
+
+ if (data[IFLA_IPOIB_UMCAST]) {
+ umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
+ ipoib_set_umcast(dev, umcast);
+ }
+
+out_err:
+ return ret;
+}
+
+static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *pdev;
+ struct ipoib_dev_priv *ppriv;
+ u16 child_pkey;
+ int err;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+
+ pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!pdev || pdev->type != ARPHRD_INFINIBAND)
+ return -ENODEV;
+
+ ppriv = ipoib_priv(pdev);
+
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
+ ipoib_warn(ppriv, "child creation disallowed for child devices\n");
+ return -EINVAL;
+ }
+
+ if (!data || !data[IFLA_IPOIB_PKEY]) {
+ ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
+ child_pkey = ppriv->pkey;
+ } else
+ child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+
+ err = ipoib_intf_init(ppriv->ca, ppriv->port, dev->name, dev);
+ if (err) {
+ ipoib_warn(ppriv, "failed to initialize pkey device\n");
+ return err;
+ }
+
+ err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
+ child_pkey, IPOIB_RTNL_CHILD);
+ if (err)
+ return err;
+
+ if (data) {
+ err = ipoib_changelink(dev, tb, data, extack);
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->parent)
+ return;
+
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t ipoib_get_size(const struct net_device *dev)
+{
+ return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
+ nla_total_size(2) + /* IFLA_IPOIB_MODE */
+ nla_total_size(2); /* IFLA_IPOIB_UMCAST */
+}
+
+static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ .kind = "ipoib",
+ .netns_refund = true,
+ .maxtype = IFLA_IPOIB_MAX,
+ .policy = ipoib_policy,
+ .priv_size = sizeof(struct ipoib_dev_priv),
+ .setup = ipoib_setup_common,
+ .newlink = ipoib_new_child_link,
+ .dellink = ipoib_del_child_link,
+ .changelink = ipoib_changelink,
+ .get_size = ipoib_get_size,
+ .fill_info = ipoib_fill_info,
+ .get_num_rx_queues = ipoib_get_max_num_queues,
+ .get_num_tx_queues = ipoib_get_max_num_queues,
+};
+
+struct rtnl_link_ops *ipoib_get_link_ops(void)
+{
+ return &ipoib_link_ops;
+}
+
+int __init ipoib_netlink_init(void)
+{
+ return rtnl_link_register(&ipoib_link_ops);
+}
+
+void __exit ipoib_netlink_fini(void)
+{
+ rtnl_link_unregister(&ipoib_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("ipoib");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
new file mode 100644
index 000000000..368e5d774
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "ipoib.h"
+
+int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr *qp_attr = NULL;
+ int ret;
+ u16 pkey_index;
+
+ if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ ret = -ENXIO;
+ goto out;
+ }
+ set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+
+ if (set_qkey) {
+ ret = -ENOMEM;
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
+ if (!qp_attr)
+ goto out;
+
+ /* set correct QKey for QP */
+ qp_attr->qkey = qkey;
+ ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* attach QP to multicast group */
+ ret = ib_attach_mcast(priv->qp, mgid, mlid);
+ if (ret)
+ ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
+
+out:
+ kfree(qp_attr);
+ return ret;
+}
+
+int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ ret = ib_detach_mcast(priv->qp, mgid, mlid);
+
+ return ret;
+}
+
+int ipoib_init_qp(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
+ return -1;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ qp_attr.qkey = 0;
+ qp_attr.port_num = priv->port;
+ qp_attr.pkey_index = priv->pkey_index;
+ attr_mask =
+ IB_QP_QKEY |
+ IB_QP_PORT |
+ IB_QP_PKEY_INDEX |
+ IB_QP_STATE;
+ ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);
+ goto out_fail;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ /* Can't set this in a INIT->RTR transition */
+ attr_mask &= ~IB_QP_PORT;
+ ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret);
+ goto out_fail;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ qp_attr.sq_psn = 0;
+ attr_mask |= IB_QP_SQ_PSN;
+ attr_mask &= ~IB_QP_PKEY_INDEX;
+ ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret);
+ goto out_fail;
+ }
+
+ return 0;
+
+out_fail:
+ qp_attr.qp_state = IB_QPS_RESET;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ ipoib_warn(priv, "Failed to modify QP to RESET state\n");
+
+ return ret;
+}
+
+int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_init_attr init_attr = {
+ .cap = {
+ .max_send_wr = ipoib_sendq_size,
+ .max_recv_wr = ipoib_recvq_size,
+ .max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1),
+ .max_recv_sge = IPOIB_UD_RX_SG
+ },
+ .sq_sig_type = IB_SIGNAL_ALL_WR,
+ .qp_type = IB_QPT_UD
+ };
+ struct ib_cq_init_attr cq_attr = {};
+
+ int ret, size, req_vec;
+ int i;
+ static atomic_t counter;
+
+ size = ipoib_recvq_size + 1;
+ ret = ipoib_cm_dev_init(dev);
+ if (!ret) {
+ size += ipoib_sendq_size;
+ if (ipoib_cm_has_srq(dev))
+ size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
+ else
+ size += ipoib_recvq_size * ipoib_max_conn_qp;
+ } else
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ req_vec = atomic_inc_return(&counter) * 2;
+ cq_attr.cqe = size;
+ cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
+ priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
+ priv, &cq_attr);
+ if (IS_ERR(priv->recv_cq)) {
+ pr_warn("%s: failed to create receive CQ\n", ca->name);
+ goto out_cm_dev_cleanup;
+ }
+
+ cq_attr.cqe = ipoib_sendq_size;
+ cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors;
+ priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
+ priv, &cq_attr);
+ if (IS_ERR(priv->send_cq)) {
+ pr_warn("%s: failed to create send CQ\n", ca->name);
+ goto out_free_recv_cq;
+ }
+
+ if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP))
+ goto out_free_send_cq;
+
+ init_attr.send_cq = priv->send_cq;
+ init_attr.recv_cq = priv->recv_cq;
+
+ if (priv->kernel_caps & IBK_UD_TSO)
+ init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
+
+ if (priv->kernel_caps & IBK_BLOCK_MULTICAST_LOOPBACK)
+ init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
+
+ if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
+ init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+
+ if (priv->kernel_caps & IBK_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
+
+ priv->qp = ib_create_qp(priv->pd, &init_attr);
+ if (IS_ERR(priv->qp)) {
+ pr_warn("%s: failed to create QP\n", ca->name);
+ goto out_free_send_cq;
+ }
+
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ goto out_free_send_cq;
+
+ for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
+ priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
+
+ priv->tx_wr.wr.opcode = IB_WR_SEND;
+ priv->tx_wr.wr.sg_list = priv->tx_sge;
+ priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED;
+
+ priv->rx_sge[0].lkey = priv->pd->local_dma_lkey;
+
+ priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr.num_sge = 1;
+
+ priv->rx_wr.next = NULL;
+ priv->rx_wr.sg_list = priv->rx_sge;
+
+ if (init_attr.cap.max_send_sge > 1)
+ dev->features |= NETIF_F_SG;
+
+ priv->max_send_sge = init_attr.cap.max_send_sge;
+
+ return 0;
+
+out_free_send_cq:
+ ib_destroy_cq(priv->send_cq);
+
+out_free_recv_cq:
+ ib_destroy_cq(priv->recv_cq);
+
+out_cm_dev_cleanup:
+ ipoib_cm_dev_cleanup(dev);
+
+ return -ENODEV;
+}
+
+void ipoib_transport_dev_cleanup(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->qp) {
+ if (ib_destroy_qp(priv->qp))
+ ipoib_warn(priv, "ib_qp_destroy failed\n");
+
+ priv->qp = NULL;
+ }
+
+ ib_destroy_cq(priv->send_cq);
+ ib_destroy_cq(priv->recv_cq);
+}
+
+void ipoib_event(struct ib_event_handler *handler,
+ struct ib_event *record)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(handler, struct ipoib_dev_priv, event_handler);
+
+ if (record->element.port_num != priv->port)
+ return;
+
+ ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
+ dev_name(&record->device->dev), record->element.port_num);
+
+ if (record->event == IB_EVENT_CLIENT_REREGISTER) {
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ } else if (record->event == IB_EVENT_PORT_ERR ||
+ record->event == IB_EVENT_PORT_ACTIVE ||
+ record->event == IB_EVENT_LID_CHANGE) {
+ queue_work(ipoib_workqueue, &priv->flush_normal);
+ } else if (record->event == IB_EVENT_PKEY_CHANGE) {
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+ } else if (record->event == IB_EVENT_GID_CHANGE &&
+ !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ }
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
new file mode 100644
index 000000000..4bd161e86
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched/signal.h>
+
+#include <linux/init.h>
+#include <linux/seq_file.h>
+
+#include <linux/uaccess.h>
+
+#include "ipoib.h"
+
+static ssize_t parent_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return sysfs_emit(buf, "%s\n", priv->parent->name);
+}
+static DEVICE_ATTR_RO(parent);
+
+static bool is_child_unique(struct ipoib_dev_priv *ppriv,
+ struct ipoib_dev_priv *priv)
+{
+ struct ipoib_dev_priv *tpriv;
+
+ ASSERT_RTNL();
+
+ /*
+ * Since the legacy sysfs interface uses pkey for deletion it cannot
+ * support more than one interface with the same pkey, it creates
+ * ambiguity. The RTNL interface deletes using the netdev so it does
+ * not have a problem to support duplicated pkeys.
+ */
+ if (priv->child_type != IPOIB_LEGACY_CHILD)
+ return true;
+
+ /*
+ * First ensure this isn't a duplicate. We check the parent device and
+ * then all of the legacy child interfaces to make sure the Pkey
+ * doesn't match.
+ */
+ if (ppriv->pkey == priv->pkey)
+ return false;
+
+ list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
+ if (tpriv->pkey == priv->pkey &&
+ tpriv->child_type == IPOIB_LEGACY_CHILD)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * NOTE: If this function fails then the priv->dev will remain valid, however
+ * priv will have been freed and must not be touched by caller in the error
+ * case.
+ *
+ * If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
+ * free the net_device (just as rtnl_newlink does) otherwise the net_device
+ * will be freed when the rtnl is unlocked.
+ */
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+ u16 pkey, int type)
+{
+ struct net_device *ndev = priv->dev;
+ int result;
+ struct rdma_netdev *rn = netdev_priv(ndev);
+
+ ASSERT_RTNL();
+
+ /*
+ * We do not need to touch priv if register_netdevice fails, so just
+ * always use this flow.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ /*
+ * Racing with unregister of the parent must be prevented by the
+ * caller.
+ */
+ WARN_ON(ppriv->dev->reg_state != NETREG_REGISTERED);
+
+ if (pkey == 0 || pkey == 0x8000) {
+ result = -EINVAL;
+ goto out_early;
+ }
+
+ rn->mtu = priv->mcast_mtu;
+
+ priv->parent = ppriv->dev;
+ priv->pkey = pkey;
+ priv->child_type = type;
+
+ if (!is_child_unique(ppriv, priv)) {
+ result = -ENOTUNIQ;
+ goto out_early;
+ }
+
+ result = register_netdevice(ndev);
+ if (result) {
+ ipoib_warn(priv, "failed to initialize; error %i", result);
+
+ /*
+ * register_netdevice sometimes calls priv_destructor,
+ * sometimes not. Make sure it was done.
+ */
+ goto out_early;
+ }
+
+ /* RTNL childs don't need proprietary sysfs entries */
+ if (type == IPOIB_LEGACY_CHILD) {
+ if (ipoib_cm_add_mode_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_pkey_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_umcast_attr(ndev))
+ goto sysfs_failed;
+
+ if (device_create_file(&ndev->dev, &dev_attr_parent))
+ goto sysfs_failed;
+ }
+
+ return 0;
+
+sysfs_failed:
+ unregister_netdevice(priv->dev);
+ return -ENOMEM;
+
+out_early:
+ if (ndev->priv_destructor)
+ ndev->priv_destructor(ndev);
+ return result;
+}
+
+int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+{
+ struct ipoib_dev_priv *ppriv, *priv;
+ char intf_name[IFNAMSIZ];
+ struct net_device *ndev;
+ int result;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (pdev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
+ ppriv = ipoib_priv(pdev);
+
+ snprintf(intf_name, sizeof(intf_name), "%s.%04x",
+ ppriv->dev->name, pkey);
+
+ ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (IS_ERR(ndev)) {
+ result = PTR_ERR(ndev);
+ goto out;
+ }
+ priv = ipoib_priv(ndev);
+
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
+
+ result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+
+ if (result && ndev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(ndev);
+
+out:
+ rtnl_unlock();
+
+ return result;
+}
+
+struct ipoib_vlan_delete_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+/*
+ * sysfs callbacks of a netdevice cannot obtain the rtnl lock as
+ * unregister_netdev ultimately deletes the sysfs files while holding the rtnl
+ * lock. This deadlocks the system.
+ *
+ * A callback can use rtnl_trylock to avoid the deadlock but it cannot call
+ * unregister_netdev as that internally takes and releases the rtnl_lock. So
+ * instead we find the netdev to unregister and then do the actual unregister
+ * from the global work queue where we can obtain the rtnl_lock safely.
+ */
+static void ipoib_vlan_delete_task(struct work_struct *work)
+{
+ struct ipoib_vlan_delete_work *pwork =
+ container_of(work, struct ipoib_vlan_delete_work, work);
+ struct net_device *dev = pwork->dev;
+
+ rtnl_lock();
+
+ /* Unregistering tasks can race with another task or parent removal */
+ if (dev->reg_state == NETREG_REGISTERED) {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
+ }
+
+ rtnl_unlock();
+
+ kfree(pwork);
+}
+
+int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
+{
+ struct ipoib_dev_priv *ppriv, *priv, *tpriv;
+ int rc;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (pdev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
+ ppriv = ipoib_priv(pdev);
+
+ rc = -ENODEV;
+ list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
+ if (priv->pkey == pkey &&
+ priv->child_type == IPOIB_LEGACY_CHILD) {
+ struct ipoib_vlan_delete_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del_init(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+ work->dev = priv->dev;
+ INIT_WORK(&work->work, ipoib_vlan_delete_task);
+ queue_work(ipoib_workqueue, &work->work);
+
+ rc = 0;
+ break;
+ }
+ }
+
+out:
+ rtnl_unlock();
+
+ return rc;
+}
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
new file mode 100644
index 000000000..3016a0c9a
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_ISER
+ tristate "iSCSI Extensions for RDMA (iSER)"
+ depends on SCSI && INET && INFINIBAND_ADDR_TRANS
+ select SCSI_ISCSI_ATTRS
+ help
+ Support for the iSCSI Extensions for RDMA (iSER) Protocol
+ over InfiniBand. This allows you to access storage devices
+ that speak iSCSI over iSER over InfiniBand.
+
+ The iSER protocol is defined by IETF.
+ See <http://www.ietf.org/rfc/rfc5046.txt>
+ and <http://members.infinibandta.org/kwspub/spec/Annex_iSER.PDF>
diff --git a/drivers/infiniband/ulp/iser/Makefile b/drivers/infiniband/ulp/iser/Makefile
new file mode 100644
index 000000000..2f3e78863
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INFINIBAND_ISER) += ib_iser.o
+
+ib_iser-y := iser_verbs.o iser_initiator.o iser_memory.o \
+ iscsi_iser.o
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
new file mode 100644
index 000000000..620ae5b2d
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -0,0 +1,1087 @@
+/*
+ * iSCSI Initiator over iSER Data-Path
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 Mike Christie
+ * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
+ * maintained by openib-general@openib.org
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Credits:
+ * Christoph Hellwig
+ * FUJITA Tomonori
+ * Arne Redlich
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/hardirq.h>
+#include <linux/kfifo.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <net/sock.h>
+
+#include <linux/uaccess.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "iscsi_iser.h"
+
+MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
+
+static struct scsi_host_template iscsi_iser_sht;
+static struct iscsi_transport iscsi_iser_transport;
+static struct scsi_transport_template *iscsi_iser_scsi_transport;
+static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
+struct iser_global ig;
+
+int iser_debug_level = 0;
+module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
+
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops iscsi_iser_size_ops = {
+ .set = iscsi_iser_set,
+ .get = param_get_uint,
+};
+
+static unsigned int iscsi_max_lun = 512;
+module_param_cb(max_lun, &iscsi_iser_size_ops, &iscsi_max_lun, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session, should > 0 (default:512)");
+
+unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
+module_param_cb(max_sectors, &iscsi_iser_size_ops, &iser_max_sectors,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command, should > 0 (default:1024)");
+
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+ "Always register memory, even for continuous memory regions (default:true)");
+
+bool iser_pi_enable = false;
+module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
+MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
+
+static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ unsigned int n = 0;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n == 0)
+ return -EINVAL;
+
+ return param_set_uint(val, kp);
+}
+
+/*
+ * iscsi_iser_recv() - Process a successful recv completion
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+ * @rx_data: buffer containing receive data payload
+ * @rx_data_len: length of rx_data
+ *
+ * Notes: In case of data length errors or iscsi PDU completion failures
+ * this routine will signal iscsi layer of connection failure.
+ */
+void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *rx_data, int rx_data_len)
+{
+ int rc = 0;
+ int datalen;
+
+ /* verify PDU length */
+ datalen = ntoh24(hdr->dlength);
+ if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
+ iser_err("wrong datalen %d (hdr), %d (IB)\n",
+ datalen, rx_data_len);
+ rc = ISCSI_ERR_DATALEN;
+ goto error;
+ }
+
+ if (datalen != rx_data_len)
+ iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
+ datalen, rx_data_len);
+
+ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+ return;
+error:
+ iscsi_conn_failure(conn, rc);
+}
+
+/**
+ * iscsi_iser_pdu_alloc() - allocate an iscsi-iser PDU
+ * @task: iscsi task
+ * @opcode: iscsi command opcode
+ *
+ * Netes: This routine can't fail, just assign iscsi task
+ * hdr and max hdr size.
+ */
+static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+
+ task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header;
+ task->hdr_max = sizeof(iser_task->desc.iscsi_header);
+
+ return 0;
+}
+
+/**
+ * iser_initialize_task_headers() - Initialize task headers
+ * @task: iscsi task
+ * @tx_desc: iser tx descriptor
+ *
+ * Notes:
+ * This routine may race with iser teardown flow for scsi
+ * error handling TMFs. So for TMF we should acquire the
+ * state mutex to avoid dereferencing the IB device which
+ * may have already been terminated.
+ */
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
+{
+ struct iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_device *device = iser_conn->ib_conn.device;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ u64 dma_addr;
+
+ if (unlikely(iser_conn->state != ISER_CONN_UP))
+ return -ENODEV;
+
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ return -ENOMEM;
+
+ tx_desc->inv_wr.next = NULL;
+ tx_desc->reg_wr.wr.next = NULL;
+ tx_desc->mapped = true;
+ tx_desc->dma_addr = dma_addr;
+ tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
+ tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+ tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
+
+ iser_task->iser_conn = iser_conn;
+
+ return 0;
+}
+
+/**
+ * iscsi_iser_task_init() - Initialize iscsi-iser task
+ * @task: iscsi task
+ *
+ * Initialize the task for the scsi command or mgmt command.
+ *
+ * Return: Returns zero on success or -ENOMEM when failing
+ * to init task headers (dma mapping error).
+ */
+static int iscsi_iser_task_init(struct iscsi_task *task)
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ int ret;
+
+ ret = iser_initialize_task_headers(task, &iser_task->desc);
+ if (ret) {
+ iser_err("Failed to init task %p, err = %d\n",
+ iser_task, ret);
+ return ret;
+ }
+
+ /* mgmt task */
+ if (!task->sc)
+ return 0;
+
+ iser_task->command_sent = 0;
+ iser_task_rdma_init(iser_task);
+ iser_task->sc = task->sc;
+
+ return 0;
+}
+
+/**
+ * iscsi_iser_mtask_xmit() - xmit management (immediate) task
+ * @conn: iscsi connection
+ * @task: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+ * call it again later, or recover. '0' return code means successful
+ * xmit.
+ *
+ **/
+static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
+ struct iscsi_task *task)
+{
+ int error = 0;
+
+ iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
+
+ error = iser_send_control(conn, task);
+
+ /* since iser xmits control with zero copy, tasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+ * - if yes, the task is recycled at iscsi_complete_pdu
+ * - if no, the task is recycled at iser_snd_completion
+ */
+ return error;
+}
+
+static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+ struct iscsi_task *task)
+{
+ struct iscsi_r2t_info *r2t = &task->unsol_r2t;
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+ while (iscsi_task_has_unsol_data(task)) {
+ iscsi_prep_data_out_pdu(task, r2t, &hdr);
+ iser_dbg("Sending data-out: itt 0x%x, data count %d\n",
+ hdr.itt, r2t->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+ error = iser_send_data_out(conn, task, &hdr);
+ if (error) {
+ r2t->datasn--;
+ goto iscsi_iser_task_xmit_unsol_data_exit;
+ }
+ r2t->sent += r2t->data_count;
+ iser_dbg("Need to send %d more as data-out PDUs\n",
+ r2t->data_length - r2t->sent);
+ }
+
+iscsi_iser_task_xmit_unsol_data_exit:
+ return error;
+}
+
+/**
+ * iscsi_iser_task_xmit() - xmit iscsi-iser task
+ * @task: iscsi task
+ *
+ * Return: zero on success or escalates $error on failure.
+ */
+static int iscsi_iser_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ int error = 0;
+
+ if (!task->sc)
+ return iscsi_iser_mtask_xmit(conn, task);
+
+ if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+ BUG_ON(scsi_bufflen(task->sc) == 0);
+
+ iser_dbg("cmd [itt %x total %d imm %d unsol_data %d\n",
+ task->itt, scsi_bufflen(task->sc),
+ task->imm_count, task->unsol_r2t.data_length);
+ }
+
+ iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
+ conn->id, task->itt);
+
+ /* Send the cmd PDU */
+ if (!iser_task->command_sent) {
+ error = iser_send_command(conn, task);
+ if (error)
+ goto iscsi_iser_task_xmit_exit;
+ iser_task->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+ if (iscsi_task_has_unsol_data(task))
+ error = iscsi_iser_task_xmit_unsol_data(conn, task);
+
+ iscsi_iser_task_xmit_exit:
+ return error;
+}
+
+/**
+ * iscsi_iser_cleanup_task() - cleanup an iscsi-iser task
+ * @task: iscsi task
+ *
+ * Notes: In case the RDMA device is already NULL (might have
+ * been removed in DEVICE_REMOVAL CM event it will bail-out
+ * without doing dma unmapping.
+ */
+static void iscsi_iser_cleanup_task(struct iscsi_task *task)
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_device *device = iser_conn->ib_conn.device;
+
+ /* DEVICE_REMOVAL event might have already released the device */
+ if (!device)
+ return;
+
+ if (likely(tx_desc->mapped)) {
+ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ tx_desc->mapped = false;
+ }
+
+ /* mgmt tasks do not need special cleanup */
+ if (!task->sc)
+ return;
+
+ if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+ iser_task->status = ISER_TASK_STATUS_COMPLETED;
+ iser_task_rdma_finalize(iser_task);
+ }
+}
+
+/**
+ * iscsi_iser_check_protection() - check protection information status of task.
+ * @task: iscsi task
+ * @sector: error sector if exsists (output)
+ *
+ * Return: zero if no data-integrity errors have occured
+ * 0x1: data-integrity error occured in the guard-block
+ * 0x2: data-integrity error occured in the reference tag
+ * 0x3: data-integrity error occured in the application tag
+ *
+ * In addition the error sector is marked.
+ */
+static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
+ ISER_DIR_IN : ISER_DIR_OUT;
+
+ return iser_check_task_pi_status(iser_task, dir, sector);
+}
+
+/**
+ * iscsi_iser_conn_create() - create a new iscsi-iser connection
+ * @cls_session: iscsi class connection
+ * @conn_idx: connection index within the session (for MCS)
+ *
+ * Return: iscsi_cls_conn when iscsi_conn_setup succeeds or NULL
+ * otherwise.
+ */
+static struct iscsi_cls_conn *
+iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
+ uint32_t conn_idx)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+
+ /*
+ * due to issues with the login code re iser sematics
+ * this not set in iscsi_conn_setup - FIXME
+ */
+ conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
+
+ return cls_conn;
+}
+
+/**
+ * iscsi_iser_conn_bind() - bind iscsi and iser connection structures
+ * @cls_session: iscsi class session
+ * @cls_conn: iscsi class connection
+ * @transport_eph: transport end-point handle
+ * @is_leading: indicate if this is the session leading connection (MCS)
+ *
+ * Return: zero on success, $error if iscsi_conn_bind fails and
+ * -EINVAL in case end-point doesn't exsits anymore or iser connection
+ * state is not UP (teardown already started).
+ */
+static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_eph, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iser_conn *iser_conn;
+ struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (error)
+ return error;
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+ ep = iscsi_lookup_endpoint(transport_eph);
+ if (!ep) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+ iser_conn = ep->dd_data;
+
+ mutex_lock(&iser_conn->state_mutex);
+ if (iser_conn->state != ISER_CONN_UP) {
+ error = -EINVAL;
+ iser_err("iser_conn %p state is %d, teardown started\n",
+ iser_conn, iser_conn->state);
+ goto out;
+ }
+
+ error = iser_alloc_rx_descriptors(iser_conn, conn->session);
+ if (error)
+ goto out;
+
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+ iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn);
+
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+out:
+ iscsi_put_endpoint(ep);
+ mutex_unlock(&iser_conn->state_mutex);
+ return error;
+}
+
+/**
+ * iscsi_iser_conn_start() - start iscsi-iser connection
+ * @cls_conn: iscsi class connection
+ *
+ * Notes: Here iser intialize (or re-initialize) stop_completion as
+ * from this point iscsi must call conn_stop in session/connection
+ * teardown so iser transport must wait for it.
+ */
+static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *iscsi_conn;
+ struct iser_conn *iser_conn;
+
+ iscsi_conn = cls_conn->dd_data;
+ iser_conn = iscsi_conn->dd_data;
+ reinit_completion(&iser_conn->stop_completion);
+
+ return iscsi_conn_start(cls_conn);
+}
+
+/**
+ * iscsi_iser_conn_stop() - stop iscsi-iser connection
+ * @cls_conn: iscsi class connection
+ * @flag: indicate if recover or terminate (passed as is)
+ *
+ * Notes: Calling iscsi_conn_stop might theoretically race with
+ * DEVICE_REMOVAL event and dereference a previously freed RDMA device
+ * handle, so we call it under iser the state lock to protect against
+ * this kind of race.
+ */
+static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iser_conn *iser_conn = conn->dd_data;
+
+ iser_info("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn);
+
+ /*
+ * Userspace may have goofed up and not bound the connection or
+ * might have only partially setup the connection.
+ */
+ if (iser_conn) {
+ mutex_lock(&iser_conn->state_mutex);
+ mutex_lock(&unbind_iser_conn_mutex);
+ iser_conn_terminate(iser_conn);
+ iscsi_conn_stop(cls_conn, flag);
+
+ /* unbind */
+ iser_conn->iscsi_conn = NULL;
+ conn->dd_data = NULL;
+ mutex_unlock(&unbind_iser_conn_mutex);
+
+ complete(&iser_conn->stop_completion);
+ mutex_unlock(&iser_conn->state_mutex);
+ } else {
+ iscsi_conn_stop(cls_conn, flag);
+ }
+}
+
+/**
+ * iscsi_iser_session_destroy() - destroy iscsi-iser session
+ * @cls_session: iscsi class session
+ *
+ * Removes and free iscsi host.
+ */
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+ iscsi_session_teardown(cls_session);
+ iscsi_host_remove(shost, false);
+ iscsi_host_free(shost);
+}
+
+static inline unsigned int iser_dif_prot_caps(int prot_caps)
+{
+ int ret = 0;
+
+ if (prot_caps & IB_PROT_T10DIF_TYPE_1)
+ ret |= SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_2)
+ ret |= SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION;
+ if (prot_caps & IB_PROT_T10DIF_TYPE_3)
+ ret |= SHOST_DIF_TYPE3_PROTECTION |
+ SHOST_DIX_TYPE3_PROTECTION;
+
+ return ret;
+}
+
+/**
+ * iscsi_iser_session_create() - create an iscsi-iser session
+ * @ep: iscsi end-point handle
+ * @cmds_max: maximum commands in this session
+ * @qdepth: session command queue depth
+ * @initial_cmdsn: initiator command sequnce number
+ *
+ * Allocates and adds a scsi host, expose DIF supprot if
+ * exists, and sets up an iscsi session.
+ */
+static struct iscsi_cls_session *
+iscsi_iser_session_create(struct iscsi_endpoint *ep,
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn)
+{
+ struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *shost;
+ struct iser_conn *iser_conn = NULL;
+ struct ib_conn *ib_conn;
+ struct ib_device *ib_dev;
+ u32 max_fr_sectors;
+
+ shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
+ if (!shost)
+ return NULL;
+ shost->transportt = iscsi_iser_scsi_transport;
+ shost->cmd_per_lun = qdepth;
+ shost->max_lun = iscsi_max_lun;
+ shost->max_id = 0;
+ shost->max_channel = 0;
+ shost->max_cmd_len = 16;
+
+ /*
+ * older userspace tools (before 2.0-870) did not pass us
+ * the leading conn's ep so this will be NULL;
+ */
+ if (ep) {
+ iser_conn = ep->dd_data;
+ shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
+ shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds);
+
+ mutex_lock(&iser_conn->state_mutex);
+ if (iser_conn->state != ISER_CONN_UP) {
+ iser_err("iser conn %p already started teardown\n",
+ iser_conn);
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+
+ ib_conn = &iser_conn->ib_conn;
+ ib_dev = ib_conn->device->ib_device;
+ if (ib_conn->pi_support) {
+ u32 sig_caps = ib_dev->attrs.sig_prot_cap;
+
+ shost->sg_prot_tablesize = shost->sg_tablesize;
+ scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
+ }
+
+ if (!(ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
+ shost->virt_boundary_mask = SZ_4K - 1;
+
+ if (iscsi_host_add(shost, ib_dev->dev.parent)) {
+ mutex_unlock(&iser_conn->state_mutex);
+ goto free_host;
+ }
+ mutex_unlock(&iser_conn->state_mutex);
+ } else {
+ shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX);
+ if (iscsi_host_add(shost, NULL))
+ goto free_host;
+ }
+
+ max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9;
+ shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
+ iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+ iser_conn, shost->sg_tablesize,
+ shost->max_sectors);
+
+ if (shost->max_sectors < iser_max_sectors)
+ iser_warn("max_sectors was reduced from %u to %u\n",
+ iser_max_sectors, shost->max_sectors);
+
+ cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
+ shost->can_queue, 0,
+ sizeof(struct iscsi_iser_task),
+ initial_cmdsn, 0);
+ if (!cls_session)
+ goto remove_host;
+
+ return cls_session;
+
+remove_host:
+ iscsi_host_remove(shost, false);
+free_host:
+ iscsi_host_free(shost);
+ return NULL;
+}
+
+static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ int value;
+
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ /* TBD */
+ break;
+ case ISCSI_PARAM_HDRDGST_EN:
+ sscanf(buf, "%d", &value);
+ if (value) {
+ iser_err("DataDigest wasn't negotiated to None\n");
+ return -EPROTO;
+ }
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ sscanf(buf, "%d", &value);
+ if (value) {
+ iser_err("DataDigest wasn't negotiated to None\n");
+ return -EPROTO;
+ }
+ break;
+ case ISCSI_PARAM_IFMARKER_EN:
+ sscanf(buf, "%d", &value);
+ if (value) {
+ iser_err("IFMarker wasn't negotiated to No\n");
+ return -EPROTO;
+ }
+ break;
+ case ISCSI_PARAM_OFMARKER_EN:
+ sscanf(buf, "%d", &value);
+ if (value) {
+ iser_err("OFMarker wasn't negotiated to No\n");
+ return -EPROTO;
+ }
+ break;
+ default:
+ return iscsi_set_param(cls_conn, param, buf, buflen);
+ }
+
+ return 0;
+}
+
+/**
+ * iscsi_iser_conn_get_stats() - get iscsi connection statistics
+ * @cls_conn: iscsi class connection
+ * @stats: iscsi stats to output
+ *
+ * Output connection statistics.
+ */
+static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt; /* always 0 */
+ stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->custom_length = 0;
+}
+
+static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct iser_conn *iser_conn = ep->dd_data;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!iser_conn || !iser_conn->ib_conn.cma_id)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &iser_conn->ib_conn.cma_id->route.addr.dst_addr,
+ param, buf);
+ default:
+ break;
+ }
+ return -ENOSYS;
+}
+
+/**
+ * iscsi_iser_ep_connect() - Initiate iSER connection establishment
+ * @shost: scsi_host
+ * @dst_addr: destination address
+ * @non_blocking: indicate if routine can block
+ *
+ * Allocate an iscsi endpoint, an iser_conn structure and bind them.
+ * After that start RDMA connection establishment via rdma_cm. We
+ * don't allocate iser_conn embedded in iscsi_endpoint since in teardown
+ * the endpoint will be destroyed at ep_disconnect while iser_conn will
+ * cleanup its resources asynchronuously.
+ *
+ * Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error)
+ * if fails.
+ */
+static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ int err;
+ struct iser_conn *iser_conn;
+ struct iscsi_endpoint *ep;
+
+ ep = iscsi_create_endpoint(0);
+ if (!ep)
+ return ERR_PTR(-ENOMEM);
+
+ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
+ if (!iser_conn) {
+ err = -ENOMEM;
+ goto failure;
+ }
+
+ ep->dd_data = iser_conn;
+ iser_conn->ep = ep;
+ iser_conn_init(iser_conn);
+
+ err = iser_connect(iser_conn, NULL, dst_addr, non_blocking);
+ if (err)
+ goto failure;
+
+ return ep;
+failure:
+ iscsi_destroy_endpoint(ep);
+ return ERR_PTR(err);
+}
+
+/**
+ * iscsi_iser_ep_poll() - poll for iser connection establishment to complete
+ * @ep: iscsi endpoint (created at ep_connect)
+ * @timeout_ms: polling timeout allowed in ms.
+ *
+ * This routine boils down to waiting for up_completion signaling
+ * that cma_id got CONNECTED event.
+ *
+ * Return: 1 if succeeded in connection establishment, 0 if timeout expired
+ * (libiscsi will retry will kick in) or -1 if interrupted by signal
+ * or more likely iser connection state transitioned to TEMINATING or
+ * DOWN during the wait period.
+ */
+static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct iser_conn *iser_conn = ep->dd_data;
+ int rc;
+
+ rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion,
+ msecs_to_jiffies(timeout_ms));
+ /* if conn establishment failed, return error code to iscsi */
+ if (rc == 0) {
+ mutex_lock(&iser_conn->state_mutex);
+ if (iser_conn->state == ISER_CONN_TERMINATING ||
+ iser_conn->state == ISER_CONN_DOWN)
+ rc = -1;
+ mutex_unlock(&iser_conn->state_mutex);
+ }
+
+ iser_info("iser conn %p rc = %d\n", iser_conn, rc);
+
+ if (rc > 0)
+ return 1; /* success, this is the equivalent of EPOLLOUT */
+ else if (!rc)
+ return 0; /* timeout */
+ else
+ return rc; /* signal */
+}
+
+/**
+ * iscsi_iser_ep_disconnect() - Initiate connection teardown process
+ * @ep: iscsi endpoint handle
+ *
+ * This routine is not blocked by iser and RDMA termination process
+ * completion as we queue a deffered work for iser/RDMA destruction
+ * and cleanup or actually call it immediately in case we didn't pass
+ * iscsi conn bind/start stage, thus it is safe.
+ */
+static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct iser_conn *iser_conn = ep->dd_data;
+
+ iser_info("ep %p iser conn %p\n", ep, iser_conn);
+
+ mutex_lock(&iser_conn->state_mutex);
+ iser_conn_terminate(iser_conn);
+
+ /*
+ * if iser_conn and iscsi_conn are bound, we must wait for
+ * iscsi_conn_stop and flush errors completion before freeing
+ * the iser resources. Otherwise we are safe to free resources
+ * immediately.
+ */
+ if (iser_conn->iscsi_conn) {
+ INIT_WORK(&iser_conn->release_work, iser_release_work);
+ queue_work(release_wq, &iser_conn->release_work);
+ mutex_unlock(&iser_conn->state_mutex);
+ } else {
+ iser_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&iser_conn->state_mutex);
+ iser_conn_release(iser_conn);
+ }
+
+ iscsi_destroy_endpoint(ep);
+}
+
+static umode_t iser_attr_is_visible(int param_type, int param)
+{
+ switch (param_type) {
+ case ISCSI_HOST_PARAM:
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ case ISCSI_PARAM:
+ switch (param) {
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_PERSISTENT_ADDRESS:
+ case ISCSI_PARAM_PERSISTENT_PORT:
+ case ISCSI_PARAM_PING_TMO:
+ case ISCSI_PARAM_RECV_TMO:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_MAX_R2T:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_FIRST_BURST:
+ case ISCSI_PARAM_MAX_BURST:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_TARGET_NAME:
+ case ISCSI_PARAM_TPGT:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_FAST_ABORT:
+ case ISCSI_PARAM_ABORT_TMO:
+ case ISCSI_PARAM_LU_RESET_TMO:
+ case ISCSI_PARAM_TGT_RESET_TMO:
+ case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static struct scsi_host_template iscsi_iser_sht = {
+ .module = THIS_MODULE,
+ .name = "iSCSI Initiator over iSER",
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = scsi_change_queue_depth,
+ .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
+ .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
+ .eh_timed_out = iscsi_eh_cmd_timed_out,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
+ .target_alloc = iscsi_target_alloc,
+ .proc_name = "iscsi_iser",
+ .this_id = -1,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
+};
+
+static struct iscsi_transport iscsi_iser_transport = {
+ .owner = THIS_MODULE,
+ .name = "iser",
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+ .destroy_session = iscsi_iser_session_destroy,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
+ .destroy_conn = iscsi_conn_teardown,
+ .attr_is_visible = iser_attr_is_visible,
+ .set_param = iscsi_iser_set_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+ .stop_conn = iscsi_iser_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+ .init_task = iscsi_iser_task_init,
+ .xmit_task = iscsi_iser_task_xmit,
+ .cleanup_task = iscsi_iser_cleanup_task,
+ .alloc_pdu = iscsi_iser_pdu_alloc,
+ .check_protection = iscsi_iser_check_protection,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+ .ep_connect = iscsi_iser_ep_connect,
+ .ep_poll = iscsi_iser_ep_poll,
+ .ep_disconnect = iscsi_iser_ep_disconnect
+};
+
+static int __init iser_init(void)
+{
+ int err;
+
+ iser_dbg("Starting iSER datamover...\n");
+
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+ sizeof(struct iser_tx_desc),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (ig.desc_cache == NULL)
+ return -ENOMEM;
+
+ /* device init is called only after the first addr resolution */
+ mutex_init(&ig.device_list_mutex);
+ INIT_LIST_HEAD(&ig.device_list);
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+ release_wq = alloc_workqueue("release workqueue", 0, 0);
+ if (!release_wq) {
+ iser_err("failed to allocate release workqueue\n");
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
+ iscsi_iser_scsi_transport = iscsi_register_transport(
+ &iscsi_iser_transport);
+ if (!iscsi_iser_scsi_transport) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto err_reg;
+ }
+
+ return 0;
+
+err_reg:
+ destroy_workqueue(release_wq);
+err_alloc_wq:
+ kmem_cache_destroy(ig.desc_cache);
+
+ return err;
+}
+
+static void __exit iser_exit(void)
+{
+ struct iser_conn *iser_conn, *n;
+ int connlist_empty;
+
+ iser_dbg("Removing iSER datamover...\n");
+ destroy_workqueue(release_wq);
+
+ mutex_lock(&ig.connlist_mutex);
+ connlist_empty = list_empty(&ig.connlist);
+ mutex_unlock(&ig.connlist_mutex);
+
+ if (!connlist_empty) {
+ iser_err("Error cleanup stage completed but we still have iser "
+ "connections, destroying them anyway\n");
+ list_for_each_entry_safe(iser_conn, n, &ig.connlist,
+ conn_list) {
+ iser_conn_release(iser_conn);
+ }
+ }
+
+ iscsi_unregister_transport(&iscsi_iser_transport);
+ kmem_cache_destroy(ig.desc_cache);
+}
+
+module_init(iser_init);
+module_exit(iser_exit);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
new file mode 100644
index 000000000..d967d5532
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -0,0 +1,574 @@
+/*
+ * iSER transport for the Open iSCSI Initiator & iSER transport internals
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 Mike Christie
+ * based on code maintained by open-iscsi@googlegroups.com
+ *
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ISCSI_ISER_H__
+#define __ISCSI_ISER_H__
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/printk.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/iser.h>
+
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/mempool.h>
+#include <linux/uio.h>
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+
+#define DRV_NAME "iser"
+#define PFX DRV_NAME ": "
+#define DRV_VER "1.6"
+
+#define iser_dbg(fmt, arg...) \
+ do { \
+ if (unlikely(iser_debug_level > 2)) \
+ printk(KERN_DEBUG PFX "%s: " fmt,\
+ __func__ , ## arg); \
+ } while (0)
+
+#define iser_warn(fmt, arg...) \
+ do { \
+ if (unlikely(iser_debug_level > 0)) \
+ pr_warn(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define iser_info(fmt, arg...) \
+ do { \
+ if (unlikely(iser_debug_level > 1)) \
+ pr_info(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define iser_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
+
+/* Default support is 512KB I/O size */
+#define ISER_DEF_MAX_SECTORS 1024
+#define ISCSI_ISER_DEF_SG_TABLESIZE \
+ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
+/* Maximum support is 16MB I/O size */
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
+
+#define ISER_DEF_XMIT_CMDS_DEFAULT 512
+#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
+ #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
+#else
+ #define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT
+#endif
+#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX
+
+/* QP settings */
+/* Maximal bounds on received asynchronous PDUs */
+#define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
+
+#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
+ * SCSI_TMFUNC(2), LOGOUT(1) */
+
+#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
+
+/* the max TX (send) WR supported by the iSER QP is defined by *
+ * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
+ * to have at max for SCSI command. The tx posting & completion handling code *
+ * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
+ * send WR. D=8 comes from 64K/8K */
+
+#define ISER_INFLIGHT_DATAOUTS 8
+
+#define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
+ (1 + ISER_INFLIGHT_DATAOUTS) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+
+/* Max registration work requests per command */
+#define ISER_MAX_REG_WR_PER_CMD 5
+
+/* For Signature we don't support DATAOUTs so no need to make room for them */
+#define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
+ (1 + ISER_MAX_REG_WR_PER_CMD) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+
+#define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr \
+ - ISER_MAX_TX_MISC_PDUS \
+ - ISER_MAX_RX_MISC_PDUS) / \
+ (1 + ISER_INFLIGHT_DATAOUTS))
+
+/* Constant PDU lengths calculations */
+#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
+
+#define ISER_RECV_DATA_SEG_LEN 128
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
+#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
+
+/* Length of an object name string */
+#define ISER_OBJECT_NAME_SIZE 64
+
+enum iser_conn_state {
+ ISER_CONN_INIT, /* descriptor allocd, no conn */
+ ISER_CONN_PENDING, /* in the process of being established */
+ ISER_CONN_UP, /* up and running */
+ ISER_CONN_TERMINATING, /* in the process of being terminated */
+ ISER_CONN_DOWN, /* shut down */
+ ISER_CONN_STATES_NUM
+};
+
+enum iser_task_status {
+ ISER_TASK_STATUS_INIT = 0,
+ ISER_TASK_STATUS_STARTED,
+ ISER_TASK_STATUS_COMPLETED
+};
+
+enum iser_data_dir {
+ ISER_DIR_IN = 0, /* to initiator */
+ ISER_DIR_OUT, /* from initiator */
+ ISER_DIRS_NUM
+};
+
+/**
+ * struct iser_data_buf - iSER data buffer
+ *
+ * @sg: pointer to the sg list
+ * @size: num entries of this sg
+ * @data_len: total beffer byte len
+ * @dma_nents: returned by dma_map_sg
+ */
+struct iser_data_buf {
+ struct scatterlist *sg;
+ int size;
+ unsigned long data_len;
+ int dma_nents;
+};
+
+/* fwd declarations */
+struct iser_device;
+struct iscsi_iser_task;
+struct iscsi_endpoint;
+struct iser_reg_resources;
+
+/**
+ * struct iser_mem_reg - iSER memory registration info
+ *
+ * @sge: memory region sg element
+ * @rkey: memory region remote key
+ * @desc: pointer to fast registration context
+ */
+struct iser_mem_reg {
+ struct ib_sge sge;
+ u32 rkey;
+ struct iser_fr_desc *desc;
+};
+
+enum iser_desc_type {
+ ISCSI_TX_CONTROL ,
+ ISCSI_TX_SCSI_COMMAND,
+ ISCSI_TX_DATAOUT
+};
+
+/**
+ * struct iser_tx_desc - iSER TX descriptor
+ *
+ * @iser_header: iser header
+ * @iscsi_header: iscsi header
+ * @type: command/control/dataout
+ * @dma_addr: header buffer dma_address
+ * @tx_sg: sg[0] points to iser/iscsi headers
+ * sg[1] optionally points to either of immediate data
+ * unsolicited data-out or control
+ * @num_sge: number sges used on this TX task
+ * @cqe: completion handler
+ * @mapped: Is the task header mapped
+ * @reg_wr: registration WR
+ * @send_wr: send WR
+ * @inv_wr: invalidate WR
+ */
+struct iser_tx_desc {
+ struct iser_ctrl iser_header;
+ struct iscsi_hdr iscsi_header;
+ enum iser_desc_type type;
+ u64 dma_addr;
+ struct ib_sge tx_sg[2];
+ int num_sge;
+ struct ib_cqe cqe;
+ bool mapped;
+ struct ib_reg_wr reg_wr;
+ struct ib_send_wr send_wr;
+ struct ib_send_wr inv_wr;
+};
+
+#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
+ sizeof(u64) + sizeof(struct ib_sge) + \
+ sizeof(struct ib_cqe)))
+/**
+ * struct iser_rx_desc - iSER RX descriptor
+ *
+ * @iser_header: iser header
+ * @iscsi_header: iscsi header
+ * @data: received data segment
+ * @dma_addr: receive buffer dma address
+ * @rx_sg: ib_sge of receive buffer
+ * @cqe: completion handler
+ * @pad: for sense data TODO: Modify to maximum sense length supported
+ */
+struct iser_rx_desc {
+ struct iser_ctrl iser_header;
+ struct iscsi_hdr iscsi_header;
+ char data[ISER_RECV_DATA_SEG_LEN];
+ u64 dma_addr;
+ struct ib_sge rx_sg;
+ struct ib_cqe cqe;
+ char pad[ISER_RX_PAD_SIZE];
+} __packed;
+
+/**
+ * struct iser_login_desc - iSER login descriptor
+ *
+ * @req: pointer to login request buffer
+ * @rsp: pointer to login response buffer
+ * @req_dma: DMA address of login request buffer
+ * @rsp_dma: DMA address of login response buffer
+ * @sge: IB sge for login post recv
+ * @cqe: completion handler
+ */
+struct iser_login_desc {
+ void *req;
+ void *rsp;
+ u64 req_dma;
+ u64 rsp_dma;
+ struct ib_sge sge;
+ struct ib_cqe cqe;
+} __packed;
+
+struct iser_conn;
+struct ib_conn;
+
+/**
+ * struct iser_device - iSER device handle
+ *
+ * @ib_device: RDMA device
+ * @pd: Protection Domain for this device
+ * @mr: Global DMA memory region
+ * @event_handler: IB events handle routine
+ * @ig_list: entry in devices list
+ * @refcount: Reference counter, dominated by open iser connections
+ */
+struct iser_device {
+ struct ib_device *ib_device;
+ struct ib_pd *pd;
+ struct ib_event_handler event_handler;
+ struct list_head ig_list;
+ int refcount;
+};
+
+/**
+ * struct iser_reg_resources - Fast registration resources
+ *
+ * @mr: memory region
+ * @sig_mr: signature memory region
+ */
+struct iser_reg_resources {
+ struct ib_mr *mr;
+ struct ib_mr *sig_mr;
+};
+
+/**
+ * struct iser_fr_desc - Fast registration descriptor
+ *
+ * @list: entry in connection fastreg pool
+ * @rsc: data buffer registration resources
+ * @sig_protected: is region protected indicator
+ * @all_list: first and last list members
+ */
+struct iser_fr_desc {
+ struct list_head list;
+ struct iser_reg_resources rsc;
+ bool sig_protected;
+ struct list_head all_list;
+};
+
+/**
+ * struct iser_fr_pool - connection fast registration pool
+ *
+ * @list: list of fastreg descriptors
+ * @lock: protects fastreg pool
+ * @size: size of the pool
+ * @all_list: first and last list members
+ */
+struct iser_fr_pool {
+ struct list_head list;
+ spinlock_t lock;
+ int size;
+ struct list_head all_list;
+};
+
+/**
+ * struct ib_conn - Infiniband related objects
+ *
+ * @cma_id: rdma_cm connection maneger handle
+ * @qp: Connection Queue-pair
+ * @cq: Connection completion queue
+ * @cq_size: The number of max outstanding completions
+ * @device: reference to iser device
+ * @fr_pool: connection fast registration pool
+ * @pi_support: Indicate device T10-PI support
+ * @reg_cqe: completion handler
+ */
+struct ib_conn {
+ struct rdma_cm_id *cma_id;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ u32 cq_size;
+ struct iser_device *device;
+ struct iser_fr_pool fr_pool;
+ bool pi_support;
+ struct ib_cqe reg_cqe;
+};
+
+/**
+ * struct iser_conn - iSER connection context
+ *
+ * @ib_conn: connection RDMA resources
+ * @iscsi_conn: link to matching iscsi connection
+ * @ep: transport handle
+ * @state: connection logical state
+ * @qp_max_recv_dtos: maximum number of data outs, corresponds
+ * to max number of post recvs
+ * @max_cmds: maximum cmds allowed for this connection
+ * @name: connection peer portal
+ * @release_work: deffered work for release job
+ * @state_mutex: protects iser onnection state
+ * @stop_completion: conn_stop completion
+ * @ib_completion: RDMA cleanup completion
+ * @up_completion: connection establishment completed
+ * (state is ISER_CONN_UP)
+ * @conn_list: entry in ig conn list
+ * @login_desc: login descriptor
+ * @rx_descs: rx buffers array (cyclic buffer)
+ * @num_rx_descs: number of rx descriptors
+ * @scsi_sg_tablesize: scsi host sg_tablesize
+ * @pages_per_mr: maximum pages available for registration
+ * @snd_w_inv: connection uses remote invalidation
+ */
+struct iser_conn {
+ struct ib_conn ib_conn;
+ struct iscsi_conn *iscsi_conn;
+ struct iscsi_endpoint *ep;
+ enum iser_conn_state state;
+ unsigned qp_max_recv_dtos;
+ u16 max_cmds;
+ char name[ISER_OBJECT_NAME_SIZE];
+ struct work_struct release_work;
+ struct mutex state_mutex;
+ struct completion stop_completion;
+ struct completion ib_completion;
+ struct completion up_completion;
+ struct list_head conn_list;
+ struct iser_login_desc login_desc;
+ struct iser_rx_desc *rx_descs;
+ u32 num_rx_descs;
+ unsigned short scsi_sg_tablesize;
+ unsigned short pages_per_mr;
+ bool snd_w_inv;
+};
+
+/**
+ * struct iscsi_iser_task - iser task context
+ *
+ * @desc: TX descriptor
+ * @iser_conn: link to iser connection
+ * @status: current task status
+ * @sc: link to scsi command
+ * @command_sent: indicate if command was sent
+ * @dir: iser data direction
+ * @rdma_reg: task rdma registration desc
+ * @data: iser data buffer desc
+ * @prot: iser protection buffer desc
+ */
+struct iscsi_iser_task {
+ struct iser_tx_desc desc;
+ struct iser_conn *iser_conn;
+ enum iser_task_status status;
+ struct scsi_cmnd *sc;
+ int command_sent;
+ int dir[ISER_DIRS_NUM];
+ struct iser_mem_reg rdma_reg[ISER_DIRS_NUM];
+ struct iser_data_buf data[ISER_DIRS_NUM];
+ struct iser_data_buf prot[ISER_DIRS_NUM];
+};
+
+/**
+ * struct iser_global - iSER global context
+ *
+ * @device_list_mutex: protects device_list
+ * @device_list: iser devices global list
+ * @connlist_mutex: protects connlist
+ * @connlist: iser connections global list
+ * @desc_cache: kmem cache for tx dataout
+ */
+struct iser_global {
+ struct mutex device_list_mutex;
+ struct list_head device_list;
+ struct mutex connlist_mutex;
+ struct list_head connlist;
+ struct kmem_cache *desc_cache;
+};
+
+extern struct iser_global ig;
+extern int iser_debug_level;
+extern bool iser_pi_enable;
+extern unsigned int iser_max_sectors;
+extern bool iser_always_reg;
+
+int iser_send_control(struct iscsi_conn *conn,
+ struct iscsi_task *task);
+
+int iser_send_command(struct iscsi_conn *conn,
+ struct iscsi_task *task);
+
+int iser_send_data_out(struct iscsi_conn *conn,
+ struct iscsi_task *task,
+ struct iscsi_data *hdr);
+
+void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+void iser_conn_init(struct iser_conn *iser_conn);
+
+void iser_conn_release(struct iser_conn *iser_conn);
+
+int iser_conn_terminate(struct iser_conn *iser_conn);
+
+void iser_release_work(struct work_struct *work);
+
+void iser_err_comp(struct ib_wc *wc, const char *type);
+void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
+void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
+
+void iser_task_rdma_init(struct iscsi_iser_task *task);
+
+void iser_task_rdma_finalize(struct iscsi_iser_task *task);
+
+void iser_free_rx_descriptors(struct iser_conn *iser_conn);
+
+void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ enum iser_data_dir cmd_dir);
+
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
+
+int iser_connect(struct iser_conn *iser_conn,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr,
+ int non_blocking);
+
+int iser_post_recvl(struct iser_conn *iser_conn);
+int iser_post_recvm(struct iser_conn *iser_conn,
+ struct iser_rx_desc *rx_desc);
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
+
+int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc);
+int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
+ struct iscsi_session *session);
+int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
+ unsigned cmds_max,
+ unsigned int size);
+void iser_free_fastreg_pool(struct ib_conn *ib_conn);
+u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir, sector_t *sector);
+
+static inline struct iser_conn *
+to_iser_conn(struct ib_conn *ib_conn)
+{
+ return container_of(ib_conn, struct iser_conn, ib_conn);
+}
+
+static inline struct iser_rx_desc *
+iser_rx(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_rx_desc, cqe);
+}
+
+static inline struct iser_tx_desc *
+iser_tx(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_tx_desc, cqe);
+}
+
+static inline struct iser_login_desc *
+iser_login(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_login_desc, cqe);
+}
+
+#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
new file mode 100644
index 000000000..8ec470c51
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -0,0 +1,746 @@
+/*
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/kfifo.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+
+#include "iscsi_iser.h"
+
+/* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Data size is stored in
+ * task->data[ISER_DIR_IN].data_len, Protection size
+ * os stored in task->prot[ISER_DIR_IN].data_len
+ */
+static int iser_prepare_read_cmd(struct iscsi_task *task)
+
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_mem_reg *mem_reg;
+ int err;
+ struct iser_ctrl *hdr = &iser_task->desc.iser_header;
+
+ err = iser_dma_map_task_data(iser_task,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ goto out_err;
+ }
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->read_va = cpu_to_be64(mem_reg->sge.addr);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+ task->itt, mem_reg->rkey,
+ (unsigned long long)mem_reg->sge.addr);
+
+ return 0;
+
+out_err:
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
+ return err;
+}
+
+/* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Data size is stored in
+ * task->data[ISER_DIR_OUT].data_len, Protection size
+ * is stored at task->prot[ISER_DIR_OUT].data_len
+ */
+static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
+ unsigned int unsol_sz, unsigned int edtl)
+{
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_mem_reg *mem_reg;
+ int err;
+ struct iser_ctrl *hdr = &iser_task->desc.iser_header;
+ struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
+ struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
+
+ err = iser_dma_map_task_data(iser_task,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
+ buf_out->data_len == imm_sz);
+ if (err) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ goto out_err;
+ }
+
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+ if (buf_out->data_len > imm_sz) {
+ hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ }
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
+ task->itt, mem_reg->rkey,
+ (unsigned long long)mem_reg->sge.addr, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+ task->itt, imm_sz);
+ tx_dsg->addr = mem_reg->sge.addr;
+ tx_dsg->length = imm_sz;
+ tx_dsg->lkey = mem_reg->sge.lkey;
+ iser_task->desc.num_sge = 2;
+ }
+
+ return 0;
+
+out_err:
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
+ return err;
+}
+
+/* creates a new tx descriptor and adds header regd buffer */
+static void iser_create_send_desc(struct iser_conn *iser_conn,
+ struct iser_tx_desc *tx_desc)
+{
+ struct iser_device *device = iser_conn->ib_conn.device;
+
+ ib_dma_sync_single_for_cpu(device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
+ tx_desc->iser_header.flags = ISER_VER;
+ tx_desc->num_sge = 1;
+}
+
+static void iser_free_login_buf(struct iser_conn *iser_conn)
+{
+ struct iser_device *device = iser_conn->ib_conn.device;
+ struct iser_login_desc *desc = &iser_conn->login_desc;
+
+ if (!desc->req)
+ return;
+
+ ib_dma_unmap_single(device->ib_device, desc->req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+ ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+
+ kfree(desc->req);
+ kfree(desc->rsp);
+
+ /* make sure we never redo any unmapping */
+ desc->req = NULL;
+ desc->rsp = NULL;
+}
+
+static int iser_alloc_login_buf(struct iser_conn *iser_conn)
+{
+ struct iser_device *device = iser_conn->ib_conn.device;
+ struct iser_login_desc *desc = &iser_conn->login_desc;
+
+ desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!desc->req)
+ return -ENOMEM;
+
+ desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device,
+ desc->req_dma))
+ goto free_req;
+
+ desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ if (!desc->rsp)
+ goto unmap_req;
+
+ desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
+ ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device,
+ desc->rsp_dma))
+ goto free_rsp;
+
+ return 0;
+
+free_rsp:
+ kfree(desc->rsp);
+unmap_req:
+ ib_dma_unmap_single(device->ib_device, desc->req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ DMA_TO_DEVICE);
+free_req:
+ kfree(desc->req);
+
+ return -ENOMEM;
+}
+
+int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
+ struct iscsi_session *session)
+{
+ int i, j;
+ u64 dma_addr;
+ struct iser_rx_desc *rx_desc;
+ struct ib_sge *rx_sg;
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+
+ iser_conn->qp_max_recv_dtos = session->cmds_max;
+
+ if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
+ iser_conn->pages_per_mr))
+ goto create_rdma_reg_res_failed;
+
+ if (iser_alloc_login_buf(iser_conn))
+ goto alloc_login_buf_fail;
+
+ iser_conn->num_rx_descs = session->cmds_max;
+ iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
+ sizeof(struct iser_rx_desc),
+ GFP_KERNEL);
+ if (!iser_conn->rx_descs)
+ goto rx_desc_alloc_fail;
+
+ rx_desc = iser_conn->rx_descs;
+
+ for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ goto rx_desc_dma_map_failed;
+
+ rx_desc->dma_addr = dma_addr;
+ rx_desc->cqe.done = iser_task_rsp;
+ rx_sg = &rx_desc->rx_sg;
+ rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->length = ISER_RX_PAYLOAD_SIZE;
+ rx_sg->lkey = device->pd->local_dma_lkey;
+ }
+
+ return 0;
+
+rx_desc_dma_map_failed:
+ rx_desc = iser_conn->rx_descs;
+ for (j = 0; j < i; j++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(iser_conn->rx_descs);
+ iser_conn->rx_descs = NULL;
+rx_desc_alloc_fail:
+ iser_free_login_buf(iser_conn);
+alloc_login_buf_fail:
+ iser_free_fastreg_pool(ib_conn);
+create_rdma_reg_res_failed:
+ iser_err("failed allocating rx descriptors / data buffers\n");
+ return -ENOMEM;
+}
+
+void iser_free_rx_descriptors(struct iser_conn *iser_conn)
+{
+ int i;
+ struct iser_rx_desc *rx_desc;
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+
+ iser_free_fastreg_pool(ib_conn);
+
+ rx_desc = iser_conn->rx_descs;
+ for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(iser_conn->rx_descs);
+ /* make sure we never redo any unmapping */
+ iser_conn->rx_descs = NULL;
+
+ iser_free_login_buf(iser_conn);
+}
+
+static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
+{
+ struct iser_conn *iser_conn = conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ int err = 0;
+ int i;
+
+ iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
+ /* check if this is the last login - going to full feature phase */
+ if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
+ goto out;
+
+ if (session->discovery_sess) {
+ iser_info("Discovery session, re-using login RX buffer\n");
+ goto out;
+ }
+
+ iser_info("Normal session, posting batch of RX %d buffers\n",
+ iser_conn->qp_max_recv_dtos - 1);
+
+ /*
+ * Initial post receive buffers.
+ * There is one already posted recv buffer (for the last login
+ * response). Therefore, the first recv buffer is skipped here.
+ */
+ for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
+ err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
+ if (err)
+ goto out;
+ }
+out:
+ return err;
+}
+
+/**
+ * iser_send_command - send command PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
+ */
+int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct iser_conn *iser_conn = conn->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ unsigned long edtl;
+ int err;
+ struct iser_data_buf *data_buf, *prot_buf;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+ struct scsi_cmnd *sc = task->sc;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ tx_desc->type = ISCSI_TX_SCSI_COMMAND;
+ tx_desc->cqe.done = iser_cmd_comp;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ data_buf = &iser_task->data[ISER_DIR_IN];
+ prot_buf = &iser_task->prot[ISER_DIR_IN];
+ } else {
+ data_buf = &iser_task->data[ISER_DIR_OUT];
+ prot_buf = &iser_task->prot[ISER_DIR_OUT];
+ }
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->sg = scsi_sglist(sc);
+ data_buf->size = scsi_sg_count(sc);
+ }
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (scsi_prot_sg_count(sc)) {
+ prot_buf->sg = scsi_prot_sglist(sc);
+ prot_buf->size = scsi_prot_sg_count(sc);
+ prot_buf->data_len = (data_buf->data_len >>
+ ilog2(sc->device->sector_size)) * 8;
+ }
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ err = iser_prepare_read_cmd(task);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+ err = iser_prepare_write_cmd(task,
+ task->imm_count,
+ task->imm_count +
+ task->unsol_r2t.data_length,
+ edtl);
+ if (err)
+ goto send_command_error;
+ }
+
+ iser_task->status = ISER_TASK_STATUS_STARTED;
+
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
+ if (!err)
+ return 0;
+
+send_command_error:
+ iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
+ return err;
+}
+
+/**
+ * iser_send_data_out - send data out PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
+ * @hdr: pointer to the LLD's iSCSI message header
+ */
+int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
+ struct iscsi_data *hdr)
+{
+ struct iser_conn *iser_conn = conn->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_tx_desc *tx_desc;
+ struct iser_mem_reg *mem_reg;
+ unsigned long buf_offset;
+ unsigned long data_seg_len;
+ uint32_t itt;
+ int err;
+ struct ib_sge *tx_dsg;
+
+ itt = (__force uint32_t)hdr->itt;
+ data_seg_len = ntoh24(hdr->dlength);
+ buf_offset = ntohl(hdr->offset);
+
+ iser_dbg("%s itt %d dseg_len %d offset %d\n",
+ __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
+
+ tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
+ if (!tx_desc)
+ return -ENOMEM;
+
+ tx_desc->type = ISCSI_TX_DATAOUT;
+ tx_desc->cqe.done = iser_dataout_comp;
+ tx_desc->iser_header.flags = ISER_VER;
+ memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
+
+ /* build the tx desc */
+ err = iser_initialize_task_headers(task, tx_desc);
+ if (err)
+ goto send_data_out_error;
+
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
+ tx_dsg = &tx_desc->tx_sg[1];
+ tx_dsg->addr = mem_reg->sge.addr + buf_offset;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = mem_reg->sge.lkey;
+ tx_desc->num_sge = 2;
+
+ if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+ iser_task->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+ iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
+ itt, buf_offset, data_seg_len);
+
+
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
+ if (!err)
+ return 0;
+
+send_data_out_error:
+ kmem_cache_free(ig.desc_cache, tx_desc);
+ iser_err("conn %p failed err %d\n", conn, err);
+ return err;
+}
+
+int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct iser_conn *iser_conn = conn->dd_data;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_tx_desc *mdesc = &iser_task->desc;
+ unsigned long data_seg_len;
+ int err = 0;
+ struct iser_device *device;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ mdesc->cqe.done = iser_ctrl_comp;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn.device;
+
+ data_seg_len = ntoh24(task->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ struct iser_login_desc *desc = &iser_conn->login_desc;
+ struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
+
+ if (task != conn->login_task) {
+ iser_err("data present on non login task!!!\n");
+ goto send_control_error;
+ }
+
+ ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
+ task->data_count, DMA_TO_DEVICE);
+
+ memcpy(desc->req, task->data, task->data_count);
+
+ ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
+ task->data_count, DMA_TO_DEVICE);
+
+ tx_dsg->addr = desc->req_dma;
+ tx_dsg->length = task->data_count;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
+ mdesc->num_sge = 2;
+ }
+
+ if (task == conn->login_task) {
+ iser_dbg("op %x dsl %lx, posting login rx buffer\n",
+ task->hdr->opcode, data_seg_len);
+ err = iser_post_recvl(iser_conn);
+ if (err)
+ goto send_control_error;
+ err = iser_post_rx_bufs(conn, task->hdr);
+ if (err)
+ goto send_control_error;
+ }
+
+ err = iser_post_send(&iser_conn->ib_conn, mdesc);
+ if (!err)
+ return 0;
+
+send_control_error:
+ iser_err("conn %p failed err %d\n",conn, err);
+ return err;
+}
+
+void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_conn *ib_conn = wc->qp->qp_context;
+ struct iser_conn *iser_conn = to_iser_conn(ib_conn);
+ struct iser_login_desc *desc = iser_login(wc->wr_cqe);
+ struct iscsi_hdr *hdr;
+ char *data;
+ int length;
+ bool full_feature_phase;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ iser_err_comp(wc, "login_rsp");
+ return;
+ }
+
+ ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
+ desc->rsp_dma, ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
+
+ hdr = desc->rsp + sizeof(struct iser_ctrl);
+ data = desc->rsp + ISER_HEADERS_LEN;
+ length = wc->byte_len - ISER_HEADERS_LEN;
+ full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
+ ISCSI_FULL_FEATURE_PHASE) &&
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL);
+
+ iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
+ hdr->itt, length);
+
+ iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
+
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ desc->rsp_dma, ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (!full_feature_phase ||
+ iser_conn->iscsi_conn->session->discovery_sess)
+ return;
+
+ /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
+ iser_post_recvm(iser_conn, iser_conn->rx_descs);
+}
+
+static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+{
+ if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
+ (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
+ iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
+ return -EINVAL;
+ }
+
+ if (desc->sig_protected)
+ desc->rsc.sig_mr->need_inval = false;
+ else
+ desc->rsc.mr->need_inval = false;
+
+ return 0;
+}
+
+static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
+ struct iscsi_hdr *hdr)
+{
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+ struct iscsi_task *task;
+ u32 rkey = wc->ex.invalidate_rkey;
+
+ iser_dbg("conn %p: remote invalidation for rkey %#x\n",
+ iser_conn, rkey);
+
+ if (unlikely(!iser_conn->snd_w_inv)) {
+ iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
+ iser_conn);
+ return -EPROTO;
+ }
+
+ task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
+ if (likely(task)) {
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_fr_desc *desc;
+
+ if (iser_task->dir[ISER_DIR_IN]) {
+ desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
+ }
+
+ if (iser_task->dir[ISER_DIR_OUT]) {
+ desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
+ }
+ } else {
+ iser_err("failed to get task for itt=%d\n", hdr->itt);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+
+void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_conn *ib_conn = wc->qp->qp_context;
+ struct iser_conn *iser_conn = to_iser_conn(ib_conn);
+ struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
+ struct iscsi_hdr *hdr;
+ int length, err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ iser_err_comp(wc, "task_rsp");
+ return;
+ }
+
+ ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
+ desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
+ DMA_FROM_DEVICE);
+
+ hdr = &desc->iscsi_header;
+ length = wc->byte_len - ISER_HEADERS_LEN;
+
+ iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
+ hdr->itt, length);
+
+ if (iser_check_remote_inv(iser_conn, wc, hdr)) {
+ iscsi_conn_failure(iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ return;
+ }
+
+ iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
+
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
+ DMA_FROM_DEVICE);
+
+ err = iser_post_recvm(iser_conn, desc);
+ if (err)
+ iser_err("posting rx buffer err %d\n", err);
+}
+
+void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ iser_err_comp(wc, "command");
+}
+
+void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
+ struct iscsi_task *task;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ iser_err_comp(wc, "control");
+ return;
+ }
+
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+ task = (void *)desc - sizeof(struct iscsi_task);
+ if (task->hdr->itt == RESERVED_ITT)
+ iscsi_put_task(task);
+}
+
+void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
+ struct ib_conn *ib_conn = wc->qp->qp_context;
+ struct iser_device *device = ib_conn->device;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ iser_err_comp(wc, "dataout");
+
+ ib_dma_unmap_single(device->ib_device, desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ kmem_cache_free(ig.desc_cache, desc);
+}
+
+void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
+
+{
+ iser_task->status = ISER_TASK_STATUS_INIT;
+
+ iser_task->dir[ISER_DIR_IN] = 0;
+ iser_task->dir[ISER_DIR_OUT] = 0;
+
+ iser_task->data[ISER_DIR_IN].data_len = 0;
+ iser_task->data[ISER_DIR_OUT].data_len = 0;
+
+ iser_task->prot[ISER_DIR_IN].data_len = 0;
+ iser_task->prot[ISER_DIR_OUT].data_len = 0;
+
+ iser_task->prot[ISER_DIR_IN].dma_nents = 0;
+ iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
+
+ memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
+ sizeof(struct iser_mem_reg));
+ memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
+ sizeof(struct iser_mem_reg));
+}
+
+void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+{
+
+ if (iser_task->dir[ISER_DIR_IN]) {
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ }
+
+ if (iser_task->dir[ISER_DIR_OUT]) {
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
+ iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ }
+}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
new file mode 100644
index 000000000..6efcb79c8
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+
+#include "iscsi_iser.h"
+
+void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
+{
+ iser_err_comp(wc, "memreg");
+}
+
+static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
+{
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fr_pool->lock, flags);
+ desc = list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+ list_del(&desc->list);
+ spin_unlock_irqrestore(&fr_pool->lock, flags);
+
+ return desc;
+}
+
+static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
+{
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fr_pool->lock, flags);
+ list_add(&desc->list, &fr_pool->list);
+ spin_unlock_irqrestore(&fr_pool->lock, flags);
+}
+
+int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
+{
+ struct iser_data_buf *data = &iser_task->data[iser_dir];
+ struct ib_device *dev;
+
+ iser_task->dir[iser_dir] = 1;
+ dev = iser_task->iser_conn->ib_conn.device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
+ if (unlikely(data->dma_nents == 0)) {
+ iser_err("dma_map_sg failed!!!\n");
+ return -EINVAL;
+ }
+
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
+
+ pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
+ if (unlikely(pdata->dma_nents == 0)) {
+ iser_err("protection dma_map_sg failed!!!\n");
+ goto out_unmap;
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
+ return -EINVAL;
+}
+
+
+void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
+{
+ struct iser_data_buf *data = &iser_task->data[iser_dir];
+ struct ib_device *dev;
+
+ dev = iser_task->iser_conn->ib_conn.device->ib_device;
+ ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
+
+ if (scsi_prot_sg_count(iser_task->sc)) {
+ struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
+
+ ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
+ }
+}
+
+static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
+ struct iser_mem_reg *reg)
+{
+ struct scatterlist *sg = mem->sg;
+
+ reg->sge.lkey = device->pd->local_dma_lkey;
+ /*
+ * FIXME: rework the registration code path to differentiate
+ * rkey/lkey use cases
+ */
+
+ if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+ reg->rkey = device->pd->unsafe_global_rkey;
+ else
+ reg->rkey = 0;
+ reg->sge.addr = sg_dma_address(&sg[0]);
+ reg->sge.length = sg_dma_len(&sg[0]);
+
+ iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
+ " length=0x%x\n", reg->sge.lkey, reg->rkey,
+ reg->sge.addr, reg->sge.length);
+
+ return 0;
+}
+
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+ struct iser_fr_desc *desc;
+ struct ib_mr_status mr_status;
+
+ desc = reg->desc;
+ if (!desc)
+ return;
+
+ /*
+ * The signature MR cannot be invalidated and reused without checking.
+ * libiscsi calls the check_protection transport handler only if
+ * SCSI-Response is received. And the signature MR is not checked if
+ * the task is completed for some other reason like a timeout or error
+ * handling. That's why we must check the signature MR here before
+ * putting it to the free pool.
+ */
+ if (unlikely(desc->sig_protected)) {
+ desc->sig_protected = false;
+ ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
+ &mr_status);
+ }
+ iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
+ reg->desc = NULL;
+}
+
+static void iser_set_dif_domain(struct scsi_cmnd *sc,
+ struct ib_sig_domain *domain)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.pi_interval = scsi_prot_interval(sc);
+ domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
+ /*
+ * At the moment we hard code those, but in the future
+ * we will take them from sc.
+ */
+ domain->sig.dif.apptag_check_mask = 0xffff;
+ domain->sig.dif.app_escape = true;
+ domain->sig.dif.ref_escape = true;
+ if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
+ domain->sig.dif.ref_remap = true;
+}
+
+static int iser_set_sig_attrs(struct scsi_cmnd *sc,
+ struct ib_sig_attrs *sig_attrs)
+{
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
+ iser_set_dif_domain(sc, &sig_attrs->wire);
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
+ iser_set_dif_domain(sc, &sig_attrs->mem);
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
+ break;
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ iser_set_dif_domain(sc, &sig_attrs->wire);
+ sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+ iser_set_dif_domain(sc, &sig_attrs->mem);
+ sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
+ IB_T10DIF_CSUM : IB_T10DIF_CRC;
+ break;
+ default:
+ iser_err("Unsupported PI operation %d\n",
+ scsi_get_prot_op(sc));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
+{
+ *mask = 0;
+ if (sc->prot_flags & SCSI_PROT_REF_CHECK)
+ *mask |= IB_SIG_CHECK_REFTAG;
+ if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
+ *mask |= IB_SIG_CHECK_GUARD;
+}
+
+static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
+ struct ib_cqe *cqe, struct ib_send_wr *next_wr)
+{
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_cqe = cqe;
+ inv_wr->ex.invalidate_rkey = mr->rkey;
+ inv_wr->send_flags = 0;
+ inv_wr->num_sge = 0;
+ inv_wr->next = next_wr;
+}
+
+static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *sig_mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *sig_reg)
+{
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
+ struct ib_mr *mr = rsc->sig_mr;
+ struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
+ int ret;
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+ ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
+ if (ret)
+ goto err;
+
+ iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
+
+ if (rsc->sig_mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+
+ ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
+ sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
+ if (unlikely(ret)) {
+ iser_err("failed to map PI sg (%d)\n",
+ mem->dma_nents + sig_mem->dma_nents);
+ goto err;
+ }
+
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.next = &tx_desc->send_wr;
+ wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
+ wr->wr.wr_cqe = cqe;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ rsc->sig_mr->need_inval = true;
+
+ sig_reg->sge.lkey = mr->lkey;
+ sig_reg->rkey = mr->rkey;
+ sig_reg->sge.addr = mr->iova;
+ sig_reg->sge.length = mr->length;
+
+ iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
+ sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
+ sig_reg->sge.length);
+err:
+ return ret;
+}
+
+static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *reg)
+{
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
+ struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
+ struct ib_mr *mr = rsc->mr;
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
+ int n;
+
+ if (rsc->mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
+ if (unlikely(n != mem->dma_nents)) {
+ iser_err("failed to map sg (%d/%d)\n",
+ n, mem->dma_nents);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ wr->wr.next = &tx_desc->send_wr;
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_cqe = cqe;
+ wr->wr.send_flags = 0;
+ wr->wr.num_sge = 0;
+ wr->mr = mr;
+ wr->key = mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ rsc->mr->need_inval = true;
+
+ reg->sge.lkey = mr->lkey;
+ reg->rkey = mr->rkey;
+ reg->sge.addr = mr->iova;
+ reg->sge.length = mr->length;
+
+ iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
+ reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
+
+ return 0;
+}
+
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm)
+{
+ struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct iser_data_buf *mem = &task->data[dir];
+ struct iser_mem_reg *reg = &task->rdma_reg[dir];
+ struct iser_fr_desc *desc;
+ bool use_dma_key;
+ int err;
+
+ use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
+ scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
+ if (use_dma_key)
+ return iser_reg_dma(device, mem, reg);
+
+ desc = iser_reg_desc_get_fr(ib_conn);
+ if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
+ err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
+ if (unlikely(err))
+ goto err_reg;
+ } else {
+ err = iser_reg_sig_mr(task, mem, &task->prot[dir],
+ &desc->rsc, reg);
+ if (unlikely(err))
+ goto err_reg;
+
+ desc->sig_protected = true;
+ }
+
+ reg->desc = desc;
+
+ return 0;
+
+err_reg:
+ iser_reg_desc_put_fr(ib_conn, desc);
+
+ return err;
+}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
new file mode 100644
index 000000000..057e69164
--- /dev/null
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -0,0 +1,967 @@
+/*
+ * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include "iscsi_iser.h"
+
+#define ISCSI_ISER_MAX_CONN 8
+#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
+ ISCSI_ISER_MAX_CONN)
+
+static void iser_qp_event_callback(struct ib_event *cause, void *context)
+{
+ iser_err("qp event %s (%d)\n",
+ ib_event_msg(cause->event), cause->event);
+}
+
+static void iser_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ iser_err("async event %s (%d) on device %s port %d\n",
+ ib_event_msg(event->event), event->event,
+ dev_name(&event->device->dev), event->element.port_num);
+}
+
+/*
+ * iser_create_device_ib_res - creates Protection Domain (PD), Completion
+ * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
+ * the adaptor.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int iser_create_device_ib_res(struct iser_device *device)
+{
+ struct ib_device *ib_dev = device->ib_device;
+
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ iser_err("IB device does not support memory registrations\n");
+ return -1;
+ }
+
+ device->pd = ib_alloc_pd(ib_dev,
+ iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
+ if (IS_ERR(device->pd))
+ goto pd_err;
+
+ INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
+ iser_event_handler);
+ ib_register_event_handler(&device->event_handler);
+ return 0;
+
+pd_err:
+ iser_err("failed to allocate an IB resource\n");
+ return -1;
+}
+
+/*
+ * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
+ * CQ and PD created with the device associated with the adaptor.
+ */
+static void iser_free_device_ib_res(struct iser_device *device)
+{
+ ib_unregister_event_handler(&device->event_handler);
+ ib_dealloc_pd(device->pd);
+
+ device->pd = NULL;
+}
+
+static struct iser_fr_desc *
+iser_create_fastreg_desc(struct iser_device *device,
+ struct ib_pd *pd,
+ bool pi_enable,
+ unsigned int size)
+{
+ struct iser_fr_desc *desc;
+ struct ib_device *ib_dev = device->ib_device;
+ enum ib_mr_type mr_type;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
+ desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
+ if (IS_ERR(desc->rsc.mr)) {
+ ret = PTR_ERR(desc->rsc.mr);
+ iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
+ goto err_alloc_mr;
+ }
+
+ if (pi_enable) {
+ desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
+ if (IS_ERR(desc->rsc.sig_mr)) {
+ ret = PTR_ERR(desc->rsc.sig_mr);
+ iser_err("Failed to allocate sig_mr err=%d\n", ret);
+ goto err_alloc_mr_integrity;
+ }
+ }
+
+ return desc;
+
+err_alloc_mr_integrity:
+ ib_dereg_mr(desc->rsc.mr);
+err_alloc_mr:
+ kfree(desc);
+
+ return ERR_PTR(ret);
+}
+
+static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
+{
+ struct iser_reg_resources *res = &desc->rsc;
+
+ ib_dereg_mr(res->mr);
+ if (res->sig_mr) {
+ ib_dereg_mr(res->sig_mr);
+ res->sig_mr = NULL;
+ }
+ kfree(desc);
+}
+
+/**
+ * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
+ * for fast registration work requests.
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
+ *
+ * Return: 0 on success, or errno code on failure
+ */
+int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
+ unsigned cmds_max,
+ unsigned int size)
+{
+ struct iser_device *device = ib_conn->device;
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc;
+ int i, ret;
+
+ INIT_LIST_HEAD(&fr_pool->list);
+ INIT_LIST_HEAD(&fr_pool->all_list);
+ spin_lock_init(&fr_pool->lock);
+ fr_pool->size = 0;
+ for (i = 0; i < cmds_max; i++) {
+ desc = iser_create_fastreg_desc(device, device->pd,
+ ib_conn->pi_support, size);
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
+ goto err;
+ }
+
+ list_add_tail(&desc->list, &fr_pool->list);
+ list_add_tail(&desc->all_list, &fr_pool->all_list);
+ fr_pool->size++;
+ }
+
+ return 0;
+
+err:
+ iser_free_fastreg_pool(ib_conn);
+ return ret;
+}
+
+/**
+ * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
+ * @ib_conn: connection RDMA resources
+ */
+void iser_free_fastreg_pool(struct ib_conn *ib_conn)
+{
+ struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc, *tmp;
+ int i = 0;
+
+ if (list_empty(&fr_pool->all_list))
+ return;
+
+ iser_info("freeing conn %p fr pool\n", ib_conn);
+
+ list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
+ list_del(&desc->all_list);
+ iser_destroy_fastreg_desc(desc);
+ ++i;
+ }
+
+ if (i < fr_pool->size)
+ iser_warn("pool still has %d regions registered\n",
+ fr_pool->size - i);
+}
+
+/*
+ * iser_create_ib_conn_res - Queue-Pair (QP)
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
+{
+ struct iser_conn *iser_conn = to_iser_conn(ib_conn);
+ struct iser_device *device;
+ struct ib_device *ib_dev;
+ struct ib_qp_init_attr init_attr;
+ int ret = -ENOMEM;
+ unsigned int max_send_wr, cq_size;
+
+ BUG_ON(ib_conn->device == NULL);
+
+ device = ib_conn->device;
+ ib_dev = device->ib_device;
+
+ /* +1 for drain */
+ if (ib_conn->pi_support)
+ max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
+ else
+ max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
+ max_send_wr = min_t(unsigned int, max_send_wr,
+ (unsigned int)ib_dev->attrs.max_qp_wr);
+
+ cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS;
+ ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ);
+ if (IS_ERR(ib_conn->cq)) {
+ ret = PTR_ERR(ib_conn->cq);
+ goto cq_err;
+ }
+ ib_conn->cq_size = cq_size;
+
+ memset(&init_attr, 0, sizeof(init_attr));
+
+ init_attr.event_handler = iser_qp_event_callback;
+ init_attr.qp_context = (void *)ib_conn;
+ init_attr.send_cq = ib_conn->cq;
+ init_attr.recv_cq = ib_conn->cq;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
+ init_attr.cap.max_send_sge = 2;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.cap.max_send_wr = max_send_wr;
+ if (ib_conn->pi_support)
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+ iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1);
+
+ ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
+ if (ret)
+ goto out_err;
+
+ ib_conn->qp = ib_conn->cma_id->qp;
+ iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
+ ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
+ return ret;
+
+out_err:
+ ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
+cq_err:
+ iser_err("unable to alloc mem or create resource, err %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * based on the resolved device node GUID see if there already allocated
+ * device for this device. If there's no such, create one.
+ */
+static
+struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
+{
+ struct iser_device *device;
+
+ mutex_lock(&ig.device_list_mutex);
+
+ list_for_each_entry(device, &ig.device_list, ig_list)
+ /* find if there's a match using the node GUID */
+ if (device->ib_device->node_guid == cma_id->device->node_guid)
+ goto inc_refcnt;
+
+ device = kzalloc(sizeof *device, GFP_KERNEL);
+ if (!device)
+ goto out;
+
+ /* assign this device to the device */
+ device->ib_device = cma_id->device;
+ /* init the device and link it into ig device list */
+ if (iser_create_device_ib_res(device)) {
+ kfree(device);
+ device = NULL;
+ goto out;
+ }
+ list_add(&device->ig_list, &ig.device_list);
+
+inc_refcnt:
+ device->refcount++;
+out:
+ mutex_unlock(&ig.device_list_mutex);
+ return device;
+}
+
+/* if there's no demand for this device, release it */
+static void iser_device_try_release(struct iser_device *device)
+{
+ mutex_lock(&ig.device_list_mutex);
+ device->refcount--;
+ iser_info("device %p refcount %d\n", device, device->refcount);
+ if (!device->refcount) {
+ iser_free_device_ib_res(device);
+ list_del(&device->ig_list);
+ kfree(device);
+ }
+ mutex_unlock(&ig.device_list_mutex);
+}
+
+/*
+ * Called with state mutex held
+ */
+static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
+ enum iser_conn_state comp,
+ enum iser_conn_state exch)
+{
+ int ret;
+
+ ret = (iser_conn->state == comp);
+ if (ret)
+ iser_conn->state = exch;
+
+ return ret;
+}
+
+void iser_release_work(struct work_struct *work)
+{
+ struct iser_conn *iser_conn;
+
+ iser_conn = container_of(work, struct iser_conn, release_work);
+
+ /* Wait for conn_stop to complete */
+ wait_for_completion(&iser_conn->stop_completion);
+ /* Wait for IB resouces cleanup to complete */
+ wait_for_completion(&iser_conn->ib_completion);
+
+ mutex_lock(&iser_conn->state_mutex);
+ iser_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&iser_conn->state_mutex);
+
+ iser_conn_release(iser_conn);
+}
+
+/**
+ * iser_free_ib_conn_res - release IB related resources
+ * @iser_conn: iser connection struct
+ * @destroy: indicator if we need to try to release the
+ * iser device and memory regoins pool (only iscsi
+ * shutdown and DEVICE_REMOVAL will use this).
+ *
+ * This routine is called with the iser state mutex held
+ * so the cm_id removal is out of here. It is Safe to
+ * be invoked multiple times.
+ */
+static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
+{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+
+ iser_info("freeing conn %p cma_id %p qp %p\n",
+ iser_conn, ib_conn->cma_id, ib_conn->qp);
+
+ if (ib_conn->qp) {
+ rdma_destroy_qp(ib_conn->cma_id);
+ ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
+ ib_conn->qp = NULL;
+ }
+
+ if (destroy) {
+ if (iser_conn->rx_descs)
+ iser_free_rx_descriptors(iser_conn);
+
+ if (device) {
+ iser_device_try_release(device);
+ ib_conn->device = NULL;
+ }
+ }
+}
+
+/**
+ * iser_conn_release - Frees all conn objects and deallocs conn descriptor
+ * @iser_conn: iSER connection context
+ */
+void iser_conn_release(struct iser_conn *iser_conn)
+{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+
+ mutex_lock(&ig.connlist_mutex);
+ list_del(&iser_conn->conn_list);
+ mutex_unlock(&ig.connlist_mutex);
+
+ mutex_lock(&iser_conn->state_mutex);
+ /* In case we endup here without ep_disconnect being invoked. */
+ if (iser_conn->state != ISER_CONN_DOWN) {
+ iser_warn("iser conn %p state %d, expected state down.\n",
+ iser_conn, iser_conn->state);
+ iscsi_destroy_endpoint(iser_conn->ep);
+ iser_conn->state = ISER_CONN_DOWN;
+ }
+ /*
+ * In case we never got to bind stage, we still need to
+ * release IB resources (which is safe to call more than once).
+ */
+ iser_free_ib_conn_res(iser_conn, true);
+ mutex_unlock(&iser_conn->state_mutex);
+
+ if (ib_conn->cma_id) {
+ rdma_destroy_id(ib_conn->cma_id);
+ ib_conn->cma_id = NULL;
+ }
+
+ kfree(iser_conn);
+}
+
+/**
+ * iser_conn_terminate - triggers start of the disconnect procedures and
+ * waits for them to be done
+ * @iser_conn: iSER connection context
+ *
+ * Called with state mutex held
+ */
+int iser_conn_terminate(struct iser_conn *iser_conn)
+{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ int err = 0;
+
+ /* terminate the iser conn only if the conn state is UP */
+ if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
+ ISER_CONN_TERMINATING))
+ return 0;
+
+ iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
+
+ /* suspend queuing of new iscsi commands */
+ if (iser_conn->iscsi_conn)
+ iscsi_suspend_queue(iser_conn->iscsi_conn);
+
+ /*
+ * In case we didn't already clean up the cma_id (peer initiated
+ * a disconnection), we need to Cause the CMA to change the QP
+ * state to ERROR.
+ */
+ if (ib_conn->cma_id) {
+ err = rdma_disconnect(ib_conn->cma_id);
+ if (err)
+ iser_err("Failed to disconnect, conn: 0x%p err %d\n",
+ iser_conn, err);
+
+ /* block until all flush errors are consumed */
+ ib_drain_qp(ib_conn->qp);
+ }
+
+ return 1;
+}
+
+/*
+ * Called with state mutex held
+ */
+static void iser_connect_error(struct rdma_cm_id *cma_id)
+{
+ struct iser_conn *iser_conn;
+
+ iser_conn = cma_id->context;
+ iser_conn->state = ISER_CONN_TERMINATING;
+}
+
+static void iser_calc_scsi_params(struct iser_conn *iser_conn,
+ unsigned int max_sectors)
+{
+ struct iser_device *device = iser_conn->ib_conn.device;
+ struct ib_device_attr *attr = &device->ib_device->attrs;
+ unsigned short sg_tablesize, sup_sg_tablesize;
+ unsigned short reserved_mr_pages;
+ u32 max_num_sg;
+
+ /*
+ * FRs without SG_GAPS can only map up to a (device) page per entry,
+ * but if the first entry is misaligned we'll end up using two entries
+ * (head and tail) for a single page worth data, so one additional
+ * entry is required.
+ */
+ if (attr->kernel_cap_flags & IBK_SG_GAPS_REG)
+ reserved_mr_pages = 0;
+ else
+ reserved_mr_pages = 1;
+
+ if (iser_conn->ib_conn.pi_support)
+ max_num_sg = attr->max_pi_fast_reg_page_list_len;
+ else
+ max_num_sg = attr->max_fast_reg_page_list_len;
+
+ sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
+ sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ max_num_sg - reserved_mr_pages);
+ iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
+ iser_conn->pages_per_mr =
+ iser_conn->scsi_sg_tablesize + reserved_mr_pages;
+}
+
+/*
+ * Called with state mutex held
+ */
+static void iser_addr_handler(struct rdma_cm_id *cma_id)
+{
+ struct iser_device *device;
+ struct iser_conn *iser_conn;
+ struct ib_conn *ib_conn;
+ int ret;
+
+ iser_conn = cma_id->context;
+ if (iser_conn->state != ISER_CONN_PENDING)
+ /* bailout */
+ return;
+
+ ib_conn = &iser_conn->ib_conn;
+ device = iser_device_find_by_ib_device(cma_id);
+ if (!device) {
+ iser_err("device lookup/creation failed\n");
+ iser_connect_error(cma_id);
+ return;
+ }
+
+ ib_conn->device = device;
+
+ /* connection T10-PI support */
+ if (iser_pi_enable) {
+ if (!(device->ib_device->attrs.kernel_cap_flags &
+ IBK_INTEGRITY_HANDOVER)) {
+ iser_warn("T10-PI requested but not supported on %s, "
+ "continue without T10-PI\n",
+ dev_name(&ib_conn->device->ib_device->dev));
+ ib_conn->pi_support = false;
+ } else {
+ ib_conn->pi_support = true;
+ }
+ }
+
+ iser_calc_scsi_params(iser_conn, iser_max_sectors);
+
+ ret = rdma_resolve_route(cma_id, 1000);
+ if (ret) {
+ iser_err("resolve route failed: %d\n", ret);
+ iser_connect_error(cma_id);
+ return;
+ }
+}
+
+/*
+ * Called with state mutex held
+ */
+static void iser_route_handler(struct rdma_cm_id *cma_id)
+{
+ struct rdma_conn_param conn_param;
+ int ret;
+ struct iser_cm_hdr req_hdr;
+ struct iser_conn *iser_conn = cma_id->context;
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct ib_device *ib_dev = ib_conn->device->ib_device;
+
+ if (iser_conn->state != ISER_CONN_PENDING)
+ /* bailout */
+ return;
+
+ ret = iser_create_ib_conn_res(ib_conn);
+ if (ret)
+ goto failure;
+
+ memset(&conn_param, 0, sizeof conn_param);
+ conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = 7;
+ conn_param.rnr_retry_count = 6;
+
+ memset(&req_hdr, 0, sizeof(req_hdr));
+ req_hdr.flags = ISER_ZBVA_NOT_SUP;
+ if (!iser_always_reg)
+ req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
+ conn_param.private_data = (void *)&req_hdr;
+ conn_param.private_data_len = sizeof(struct iser_cm_hdr);
+
+ ret = rdma_connect_locked(cma_id, &conn_param);
+ if (ret) {
+ iser_err("failure connecting: %d\n", ret);
+ goto failure;
+ }
+
+ return;
+failure:
+ iser_connect_error(cma_id);
+}
+
+static void iser_connected_handler(struct rdma_cm_id *cma_id,
+ const void *private_data)
+{
+ struct iser_conn *iser_conn;
+ struct ib_qp_attr attr;
+ struct ib_qp_init_attr init_attr;
+
+ iser_conn = cma_id->context;
+ if (iser_conn->state != ISER_CONN_PENDING)
+ /* bailout */
+ return;
+
+ (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
+ iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
+
+ if (private_data) {
+ u8 flags = *(u8 *)private_data;
+
+ iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
+ }
+
+ iser_info("conn %p: negotiated %s invalidation\n",
+ iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
+
+ iser_conn->state = ISER_CONN_UP;
+ complete(&iser_conn->up_completion);
+}
+
+static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
+{
+ struct iser_conn *iser_conn = cma_id->context;
+
+ if (iser_conn_terminate(iser_conn)) {
+ if (iser_conn->iscsi_conn)
+ iscsi_conn_failure(iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
+}
+
+static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
+ bool destroy)
+{
+ struct iser_conn *iser_conn = cma_id->context;
+
+ /*
+ * We are not guaranteed that we visited disconnected_handler
+ * by now, call it here to be safe that we handle CM drep
+ * and flush errors.
+ */
+ iser_disconnected_handler(cma_id);
+ iser_free_ib_conn_res(iser_conn, destroy);
+ complete(&iser_conn->ib_completion);
+}
+
+static int iser_cma_handler(struct rdma_cm_id *cma_id,
+ struct rdma_cm_event *event)
+{
+ struct iser_conn *iser_conn;
+ int ret = 0;
+
+ iser_conn = cma_id->context;
+ iser_info("%s (%d): status %d conn %p id %p\n",
+ rdma_event_msg(event->event), event->event,
+ event->status, cma_id->context, cma_id);
+
+ mutex_lock(&iser_conn->state_mutex);
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ iser_addr_handler(cma_id);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ iser_route_handler(cma_id);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ iser_connected_handler(cma_id, event->param.conn.private_data);
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ iser_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
+ fallthrough;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ iser_connect_error(cma_id);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ iser_cleanup_handler(cma_id, false);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /*
+ * we *must* destroy the device as we cannot rely
+ * on iscsid to be around to initiate error handling.
+ * also if we are not in state DOWN implicitly destroy
+ * the cma_id.
+ */
+ iser_cleanup_handler(cma_id, true);
+ if (iser_conn->state != ISER_CONN_DOWN) {
+ iser_conn->ib_conn.cma_id = NULL;
+ ret = 1;
+ }
+ break;
+ default:
+ iser_err("Unexpected RDMA CM event: %s (%d)\n",
+ rdma_event_msg(event->event), event->event);
+ break;
+ }
+ mutex_unlock(&iser_conn->state_mutex);
+
+ return ret;
+}
+
+void iser_conn_init(struct iser_conn *iser_conn)
+{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+
+ iser_conn->state = ISER_CONN_INIT;
+ init_completion(&iser_conn->stop_completion);
+ init_completion(&iser_conn->ib_completion);
+ init_completion(&iser_conn->up_completion);
+ INIT_LIST_HEAD(&iser_conn->conn_list);
+ mutex_init(&iser_conn->state_mutex);
+
+ ib_conn->reg_cqe.done = iser_reg_comp;
+}
+
+/*
+ * starts the process of connecting to the target
+ * sleeps until the connection is established or rejected
+ */
+int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int non_blocking)
+{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ int err = 0;
+
+ mutex_lock(&iser_conn->state_mutex);
+
+ sprintf(iser_conn->name, "%pISp", dst_addr);
+
+ iser_info("connecting to: %s\n", iser_conn->name);
+
+ /* the device is known only --after-- address resolution */
+ ib_conn->device = NULL;
+
+ iser_conn->state = ISER_CONN_PENDING;
+
+ ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
+ iser_conn, RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(ib_conn->cma_id)) {
+ err = PTR_ERR(ib_conn->cma_id);
+ iser_err("rdma_create_id failed: %d\n", err);
+ goto id_failure;
+ }
+
+ err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
+ if (err) {
+ iser_err("rdma_resolve_addr failed: %d\n", err);
+ goto addr_failure;
+ }
+
+ if (!non_blocking) {
+ wait_for_completion_interruptible(&iser_conn->up_completion);
+
+ if (iser_conn->state != ISER_CONN_UP) {
+ err = -EIO;
+ goto connect_failure;
+ }
+ }
+ mutex_unlock(&iser_conn->state_mutex);
+
+ mutex_lock(&ig.connlist_mutex);
+ list_add(&iser_conn->conn_list, &ig.connlist);
+ mutex_unlock(&ig.connlist_mutex);
+ return 0;
+
+id_failure:
+ ib_conn->cma_id = NULL;
+addr_failure:
+ iser_conn->state = ISER_CONN_DOWN;
+connect_failure:
+ mutex_unlock(&iser_conn->state_mutex);
+ iser_conn_release(iser_conn);
+ return err;
+}
+
+int iser_post_recvl(struct iser_conn *iser_conn)
+{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct iser_login_desc *desc = &iser_conn->login_desc;
+ struct ib_recv_wr wr;
+ int ret;
+
+ desc->sge.addr = desc->rsp_dma;
+ desc->sge.length = ISER_RX_LOGIN_SIZE;
+ desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
+
+ desc->cqe.done = iser_login_rsp;
+ wr.wr_cqe = &desc->cqe;
+ wr.sg_list = &desc->sge;
+ wr.num_sge = 1;
+ wr.next = NULL;
+
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv login failed ret=%d\n", ret);
+
+ return ret;
+}
+
+int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
+{
+ struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ struct ib_recv_wr wr;
+ int ret;
+
+ rx_desc->cqe.done = iser_task_rsp;
+ wr.wr_cqe = &rx_desc->cqe;
+ wr.sg_list = &rx_desc->rx_sg;
+ wr.num_sge = 1;
+ wr.next = NULL;
+
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv failed ret=%d\n", ret);
+
+ return ret;
+}
+
+
+/**
+ * iser_post_send - Initiate a Send DTO operation
+ * @ib_conn: connection RDMA resources
+ * @tx_desc: iSER TX descriptor
+ *
+ * Return: 0 on success, -1 on failure
+ */
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
+{
+ struct ib_send_wr *wr = &tx_desc->send_wr;
+ struct ib_send_wr *first_wr;
+ int ret;
+
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN,
+ DMA_TO_DEVICE);
+
+ wr->next = NULL;
+ wr->wr_cqe = &tx_desc->cqe;
+ wr->sg_list = tx_desc->tx_sg;
+ wr->num_sge = tx_desc->num_sge;
+ wr->opcode = IB_WR_SEND;
+ wr->send_flags = IB_SEND_SIGNALED;
+
+ if (tx_desc->inv_wr.next)
+ first_wr = &tx_desc->inv_wr;
+ else if (tx_desc->reg_wr.wr.next)
+ first_wr = &tx_desc->reg_wr.wr;
+ else
+ first_wr = wr;
+
+ ret = ib_post_send(ib_conn->qp, first_wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_send failed, ret:%d opcode:%d\n",
+ ret, wr->opcode);
+
+ return ret;
+}
+
+u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir, sector_t *sector)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+ struct iser_fr_desc *desc = reg->desc;
+ unsigned long sector_size = iser_task->sc->device->sector_size;
+ struct ib_mr_status mr_status;
+ int ret;
+
+ if (desc && desc->sig_protected) {
+ desc->sig_protected = false;
+ ret = ib_check_mr_status(desc->rsc.sig_mr,
+ IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ iser_err("ib_check_mr_status failed, ret %d\n", ret);
+ /* Not a lot we can do, return ambiguous guard error */
+ *sector = 0;
+ return 0x1;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ sector_t sector_off = mr_status.sig_err.sig_err_offset;
+
+ sector_div(sector_off, sector_size + 8);
+ *sector = scsi_get_sector(iser_task->sc) + sector_off;
+
+ iser_err("PI error found type %d at sector %llx "
+ "expected %x vs actual %x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)*sector,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ return 0x1;
+ case IB_SIG_BAD_REFTAG:
+ return 0x3;
+ case IB_SIG_BAD_APPTAG:
+ return 0x2;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void iser_err_comp(struct ib_wc *wc, const char *type)
+{
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
+
+ iser_err("%s failure: %s (%d) vend_err %#x\n", type,
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->vendor_err);
+
+ if (iser_conn->iscsi_conn)
+ iscsi_conn_failure(iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ } else {
+ iser_dbg("%s failure: %s (%d)\n", type,
+ ib_wc_status_msg(wc->status), wc->status);
+ }
+}
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
new file mode 100644
index 000000000..798147adb
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_ISERT
+ tristate "iSCSI Extensions for RDMA (iSER) target support"
+ depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
+ help
+ Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/infiniband/ulp/isert/Makefile b/drivers/infiniband/ulp/isert/Makefile
new file mode 100644
index 000000000..e19b16caf
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
new file mode 100644
index 000000000..a02a3caea
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -0,0 +1,2668 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ * This file contains iSCSI extentions for RDMA (iSER) Verbs
+ *
+ * (c) Copyright 2013 Datera, Inc.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ ****************************************************************************/
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cm.h>
+#include <rdma/rdma_cm.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
+#include <linux/semaphore.h>
+
+#include "ib_isert.h"
+
+static int isert_debug_level;
+module_param_named(debug_level, isert_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
+
+static int isert_sg_tablesize_set(const char *val,
+ const struct kernel_param *kp);
+static const struct kernel_param_ops sg_tablesize_ops = {
+ .set = isert_sg_tablesize_set,
+ .get = param_get_int,
+};
+
+static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
+module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
+MODULE_PARM_DESC(sg_tablesize,
+ "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
+
+static DEFINE_MUTEX(device_list_mutex);
+static LIST_HEAD(device_list);
+static struct workqueue_struct *isert_login_wq;
+static struct workqueue_struct *isert_comp_wq;
+static struct workqueue_struct *isert_release_wq;
+
+static int
+isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd);
+static int
+isert_login_post_recv(struct isert_conn *isert_conn);
+static int
+isert_rdma_accept(struct isert_conn *isert_conn);
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
+static void isert_release_work(struct work_struct *work);
+static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < ISCSI_ISER_MIN_SG_TABLESIZE ||
+ n > ISCSI_ISER_MAX_SG_TABLESIZE)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline bool
+isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+{
+ return (conn->pi_support &&
+ cmd->prot_op != TARGET_PROT_NORMAL);
+}
+
+static void
+isert_qp_event_callback(struct ib_event *e, void *context)
+{
+ struct isert_conn *isert_conn = context;
+
+ isert_err("%s (%d): conn %p\n",
+ ib_event_msg(e->event), e->event, isert_conn);
+
+ switch (e->event) {
+ case IB_EVENT_COMM_EST:
+ rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
+ break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static struct ib_qp *
+isert_create_qp(struct isert_conn *isert_conn,
+ struct rdma_cm_id *cma_id)
+{
+ u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct ib_qp_init_attr attr;
+ int ret, factor;
+
+ isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE);
+ if (IS_ERR(isert_conn->cq)) {
+ isert_err("Unable to allocate cq\n");
+ ret = PTR_ERR(isert_conn->cq);
+ return ERR_PTR(ret);
+ }
+ isert_conn->cq_size = cq_size;
+
+ memset(&attr, 0, sizeof(struct ib_qp_init_attr));
+ attr.event_handler = isert_qp_event_callback;
+ attr.qp_context = isert_conn;
+ attr.send_cq = isert_conn->cq;
+ attr.recv_cq = isert_conn->cq;
+ attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
+ attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
+ factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num,
+ isert_sg_tablesize);
+ attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor;
+ attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
+ attr.cap.max_recv_sge = 1;
+ attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ attr.qp_type = IB_QPT_RC;
+ if (device->pi_capable)
+ attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+
+ ret = rdma_create_qp(cma_id, device->pd, &attr);
+ if (ret) {
+ isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+ ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
+
+ return ERR_PTR(ret);
+ }
+
+ return cma_id->qp;
+}
+
+static int
+isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
+{
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct iser_rx_desc *rx_desc;
+ struct ib_sge *rx_sg;
+ u64 dma_addr;
+ int i, j;
+
+ isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS,
+ sizeof(struct iser_rx_desc),
+ GFP_KERNEL);
+ if (!isert_conn->rx_descs)
+ return -ENOMEM;
+
+ rx_desc = isert_conn->rx_descs;
+
+ for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
+ dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, dma_addr))
+ goto dma_map_fail;
+
+ rx_desc->dma_addr = dma_addr;
+
+ rx_sg = &rx_desc->rx_sg;
+ rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
+ rx_sg->length = ISER_RX_PAYLOAD_SIZE;
+ rx_sg->lkey = device->pd->local_dma_lkey;
+ rx_desc->rx_cqe.done = isert_recv_done;
+ }
+
+ return 0;
+
+dma_map_fail:
+ rx_desc = isert_conn->rx_descs;
+ for (j = 0; j < i; j++, rx_desc++) {
+ ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ }
+ kfree(isert_conn->rx_descs);
+ isert_conn->rx_descs = NULL;
+ isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
+ return -ENOMEM;
+}
+
+static void
+isert_free_rx_descriptors(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
+ struct iser_rx_desc *rx_desc;
+ int i;
+
+ if (!isert_conn->rx_descs)
+ return;
+
+ rx_desc = isert_conn->rx_descs;
+ for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
+ ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ }
+
+ kfree(isert_conn->rx_descs);
+ isert_conn->rx_descs = NULL;
+}
+
+static int
+isert_create_device_ib_res(struct isert_device *device)
+{
+ struct ib_device *ib_dev = device->ib_device;
+ int ret;
+
+ isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
+ ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge);
+ isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
+
+ device->pd = ib_alloc_pd(ib_dev, 0);
+ if (IS_ERR(device->pd)) {
+ ret = PTR_ERR(device->pd);
+ isert_err("failed to allocate pd, device %p, ret=%d\n",
+ device, ret);
+ return ret;
+ }
+
+ /* Check signature cap */
+ if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)
+ device->pi_capable = true;
+ else
+ device->pi_capable = false;
+
+ return 0;
+}
+
+static void
+isert_free_device_ib_res(struct isert_device *device)
+{
+ isert_info("device %p\n", device);
+
+ ib_dealloc_pd(device->pd);
+}
+
+static void
+isert_device_put(struct isert_device *device)
+{
+ mutex_lock(&device_list_mutex);
+ device->refcount--;
+ isert_info("device %p refcount %d\n", device, device->refcount);
+ if (!device->refcount) {
+ isert_free_device_ib_res(device);
+ list_del(&device->dev_node);
+ kfree(device);
+ }
+ mutex_unlock(&device_list_mutex);
+}
+
+static struct isert_device *
+isert_device_get(struct rdma_cm_id *cma_id)
+{
+ struct isert_device *device;
+ int ret;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(device, &device_list, dev_node) {
+ if (device->ib_device->node_guid == cma_id->device->node_guid) {
+ device->refcount++;
+ isert_info("Found iser device %p refcount %d\n",
+ device, device->refcount);
+ mutex_unlock(&device_list_mutex);
+ return device;
+ }
+ }
+
+ device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
+ if (!device) {
+ mutex_unlock(&device_list_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&device->dev_node);
+
+ device->ib_device = cma_id->device;
+ ret = isert_create_device_ib_res(device);
+ if (ret) {
+ kfree(device);
+ mutex_unlock(&device_list_mutex);
+ return ERR_PTR(ret);
+ }
+
+ device->refcount++;
+ list_add_tail(&device->dev_node, &device_list);
+ isert_info("Created a new iser device %p refcount %d\n",
+ device, device->refcount);
+ mutex_unlock(&device_list_mutex);
+
+ return device;
+}
+
+static void
+isert_init_conn(struct isert_conn *isert_conn)
+{
+ isert_conn->state = ISER_CONN_INIT;
+ INIT_LIST_HEAD(&isert_conn->node);
+ init_completion(&isert_conn->login_comp);
+ init_completion(&isert_conn->login_req_comp);
+ init_waitqueue_head(&isert_conn->rem_wait);
+ kref_init(&isert_conn->kref);
+ mutex_init(&isert_conn->mutex);
+ INIT_WORK(&isert_conn->release_work, isert_release_work);
+}
+
+static void
+isert_free_login_buf(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
+
+ ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
+ kfree(isert_conn->login_rsp_buf);
+
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ kfree(isert_conn->login_desc);
+}
+
+static int
+isert_alloc_login_buf(struct isert_conn *isert_conn,
+ struct ib_device *ib_dev)
+{
+ int ret;
+
+ isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
+ GFP_KERNEL);
+ if (!isert_conn->login_desc)
+ return -ENOMEM;
+
+ isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
+ isert_conn->login_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
+ if (ret) {
+ isert_err("login_desc dma mapping error: %d\n", ret);
+ isert_conn->login_desc->dma_addr = 0;
+ goto out_free_login_desc;
+ }
+
+ isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!isert_conn->login_rsp_buf) {
+ ret = -ENOMEM;
+ goto out_unmap_login_desc;
+ }
+
+ isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
+ isert_conn->login_rsp_buf,
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
+ ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
+ if (ret) {
+ isert_err("login_rsp_dma mapping error: %d\n", ret);
+ isert_conn->login_rsp_dma = 0;
+ goto out_free_login_rsp_buf;
+ }
+
+ return 0;
+
+out_free_login_rsp_buf:
+ kfree(isert_conn->login_rsp_buf);
+out_unmap_login_desc:
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+out_free_login_desc:
+ kfree(isert_conn->login_desc);
+ return ret;
+}
+
+static void
+isert_set_nego_params(struct isert_conn *isert_conn,
+ struct rdma_conn_param *param)
+{
+ struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
+
+ /* Set max inflight RDMA READ requests */
+ isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
+ attr->max_qp_init_rd_atom);
+ isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+
+ if (param->private_data) {
+ u8 flags = *(u8 *)param->private_data;
+
+ /*
+ * use remote invalidation if the both initiator
+ * and the HCA support it
+ */
+ isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
+ (attr->device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS);
+ if (isert_conn->snd_w_inv)
+ isert_info("Using remote invalidation\n");
+ }
+}
+
+static void
+isert_destroy_qp(struct isert_conn *isert_conn)
+{
+ ib_destroy_qp(isert_conn->qp);
+ ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
+}
+
+static int
+isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+{
+ struct isert_np *isert_np = cma_id->context;
+ struct iscsi_np *np = isert_np->np;
+ struct isert_conn *isert_conn;
+ struct isert_device *device;
+ int ret = 0;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (!np->enabled) {
+ spin_unlock_bh(&np->np_thread_lock);
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
+ return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ isert_dbg("cma_id: %p, portal: %p\n",
+ cma_id, cma_id->context);
+
+ isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
+ if (!isert_conn)
+ return -ENOMEM;
+
+ isert_init_conn(isert_conn);
+ isert_conn->cm_id = cma_id;
+
+ device = isert_device_get(cma_id);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ goto out;
+ }
+ isert_conn->device = device;
+
+ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
+ if (ret)
+ goto out_conn_dev;
+
+ isert_set_nego_params(isert_conn, &event->param.conn);
+
+ isert_conn->qp = isert_create_qp(isert_conn, cma_id);
+ if (IS_ERR(isert_conn->qp)) {
+ ret = PTR_ERR(isert_conn->qp);
+ goto out_rsp_dma_map;
+ }
+
+ ret = isert_login_post_recv(isert_conn);
+ if (ret)
+ goto out_destroy_qp;
+
+ ret = isert_rdma_accept(isert_conn);
+ if (ret)
+ goto out_destroy_qp;
+
+ mutex_lock(&isert_np->mutex);
+ list_add_tail(&isert_conn->node, &isert_np->accepted);
+ mutex_unlock(&isert_np->mutex);
+
+ return 0;
+
+out_destroy_qp:
+ isert_destroy_qp(isert_conn);
+out_rsp_dma_map:
+ isert_free_login_buf(isert_conn);
+out_conn_dev:
+ isert_device_put(device);
+out:
+ kfree(isert_conn);
+ rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
+ return ret;
+}
+
+static void
+isert_connect_release(struct isert_conn *isert_conn)
+{
+ struct isert_device *device = isert_conn->device;
+
+ isert_dbg("conn %p\n", isert_conn);
+
+ BUG_ON(!device);
+
+ isert_free_rx_descriptors(isert_conn);
+ if (isert_conn->cm_id &&
+ !isert_conn->dev_removed)
+ rdma_destroy_id(isert_conn->cm_id);
+
+ if (isert_conn->qp)
+ isert_destroy_qp(isert_conn);
+
+ if (isert_conn->login_desc)
+ isert_free_login_buf(isert_conn);
+
+ isert_device_put(device);
+
+ if (isert_conn->dev_removed)
+ wake_up_interruptible(&isert_conn->rem_wait);
+ else
+ kfree(isert_conn);
+}
+
+static void
+isert_connected_handler(struct rdma_cm_id *cma_id)
+{
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ struct isert_np *isert_np = cma_id->context;
+
+ isert_info("conn %p\n", isert_conn);
+
+ mutex_lock(&isert_conn->mutex);
+ isert_conn->state = ISER_CONN_UP;
+ kref_get(&isert_conn->kref);
+ mutex_unlock(&isert_conn->mutex);
+
+ mutex_lock(&isert_np->mutex);
+ list_move_tail(&isert_conn->node, &isert_np->pending);
+ mutex_unlock(&isert_np->mutex);
+
+ isert_info("np %p: Allow accept_np to continue\n", isert_np);
+ up(&isert_np->sem);
+}
+
+static void
+isert_release_kref(struct kref *kref)
+{
+ struct isert_conn *isert_conn = container_of(kref,
+ struct isert_conn, kref);
+
+ isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
+ current->pid);
+
+ isert_connect_release(isert_conn);
+}
+
+static void
+isert_put_conn(struct isert_conn *isert_conn)
+{
+ kref_put(&isert_conn->kref, isert_release_kref);
+}
+
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
+/**
+ * isert_conn_terminate() - Initiate connection termination
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+ * In case the connection state is BOUND, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
+ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with mutex held. Thus it is
+ * safe to call multiple times.
+ */
+static void
+isert_conn_terminate(struct isert_conn *isert_conn)
+{
+ int err;
+
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+}
+
+static int
+isert_np_cma_handler(struct isert_np *isert_np,
+ enum rdma_cm_event_type event)
+{
+ isert_dbg("%s (%d): isert np %p\n",
+ rdma_event_msg(event), event, isert_np);
+
+ switch (event) {
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_np->cm_id = NULL;
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ isert_np->cm_id = isert_setup_id(isert_np);
+ if (IS_ERR(isert_np->cm_id)) {
+ isert_err("isert np %p setup id failed: %ld\n",
+ isert_np, PTR_ERR(isert_np->cm_id));
+ isert_np->cm_id = NULL;
+ }
+ break;
+ default:
+ isert_err("isert np %p Unexpected event %d\n",
+ isert_np, event);
+ }
+
+ return -1;
+}
+
+static int
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ enum rdma_cm_event_type event)
+{
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+ mutex_lock(&isert_conn->mutex);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ ib_drain_qp(isert_conn->qp);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p terminating in state %d\n",
+ isert_conn, isert_conn->state);
+ }
+ mutex_unlock(&isert_conn->mutex);
+
+ return 0;
+}
+
+static int
+isert_connect_error(struct rdma_cm_id *cma_id)
+{
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ struct isert_np *isert_np = cma_id->context;
+
+ ib_drain_qp(isert_conn->qp);
+
+ mutex_lock(&isert_np->mutex);
+ list_del_init(&isert_conn->node);
+ mutex_unlock(&isert_np->mutex);
+ isert_conn->cm_id = NULL;
+ isert_put_conn(isert_conn);
+
+ return -1;
+}
+
+static int
+isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+{
+ struct isert_np *isert_np = cma_id->context;
+ struct isert_conn *isert_conn;
+ int ret = 0;
+
+ isert_info("%s (%d): status %d id %p np %p\n",
+ rdma_event_msg(event->event), event->event,
+ event->status, cma_id, cma_id->context);
+
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ ret = isert_connect_request(cma_id, event);
+ if (ret)
+ isert_err("failed handle connect request %d\n", ret);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ isert_connected_handler(cma_id);
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
+ ret = isert_disconnected_handler(cma_id, event->event);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_conn = cma_id->qp->qp_context;
+ isert_conn->dev_removed = true;
+ isert_disconnected_handler(cma_id, event->event);
+ wait_event_interruptible(isert_conn->rem_wait,
+ isert_conn->state == ISER_CONN_DOWN);
+ kfree(isert_conn);
+ /*
+ * return non-zero from the callback to destroy
+ * the rdma cm id
+ */
+ return 1;
+ case RDMA_CM_EVENT_REJECTED:
+ isert_info("Connection rejected: %s\n",
+ rdma_reject_msg(cma_id, event->status));
+ fallthrough;
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ ret = isert_connect_error(cma_id);
+ break;
+ default:
+ isert_err("Unhandled RDMA CMA event: %d\n", event->event);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+isert_post_recvm(struct isert_conn *isert_conn, u32 count)
+{
+ struct ib_recv_wr *rx_wr;
+ int i, ret;
+ struct iser_rx_desc *rx_desc;
+
+ for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
+ rx_desc = &isert_conn->rx_descs[i];
+
+ rx_wr->wr_cqe = &rx_desc->rx_cqe;
+ rx_wr->sg_list = &rx_desc->rx_sg;
+ rx_wr->num_sge = 1;
+ rx_wr->next = rx_wr + 1;
+ rx_desc->in_use = false;
+ }
+ rx_wr--;
+ rx_wr->next = NULL; /* mark end of work requests list */
+
+ ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL);
+ if (ret)
+ isert_err("ib_post_recv() failed with ret: %d\n", ret);
+
+ return ret;
+}
+
+static int
+isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
+{
+ struct ib_recv_wr rx_wr;
+ int ret;
+
+ if (!rx_desc->in_use) {
+ /*
+ * if the descriptor is not in-use we already reposted it
+ * for recv, so just silently return
+ */
+ return 0;
+ }
+
+ rx_desc->in_use = false;
+ rx_wr.wr_cqe = &rx_desc->rx_cqe;
+ rx_wr.sg_list = &rx_desc->rx_sg;
+ rx_wr.num_sge = 1;
+ rx_wr.next = NULL;
+
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
+ if (ret)
+ isert_err("ib_post_recv() failed with ret: %d\n", ret);
+
+ return ret;
+}
+
+static int
+isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
+{
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct ib_send_wr send_wr;
+ int ret;
+
+ ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+ tx_desc->tx_cqe.done = isert_login_send_done;
+
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &tx_desc->tx_cqe;
+ send_wr.sg_list = tx_desc->tx_sg;
+ send_wr.num_sge = tx_desc->num_sge;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(isert_conn->qp, &send_wr, NULL);
+ if (ret)
+ isert_err("ib_post_send() failed, ret: %d\n", ret);
+
+ return ret;
+}
+
+static void
+__isert_create_send_desc(struct isert_device *device,
+ struct iser_tx_desc *tx_desc)
+{
+
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
+ tx_desc->iser_header.flags = ISCSI_CTRL;
+
+ tx_desc->num_sge = 1;
+
+ if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
+ tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
+ isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
+ }
+}
+
+static void
+isert_create_send_desc(struct isert_conn *isert_conn,
+ struct isert_cmd *isert_cmd,
+ struct iser_tx_desc *tx_desc)
+{
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+
+ ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+ __isert_create_send_desc(device, tx_desc);
+}
+
+static int
+isert_init_tx_hdrs(struct isert_conn *isert_conn,
+ struct iser_tx_desc *tx_desc)
+{
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ u64 dma_addr;
+
+ dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, dma_addr)) {
+ isert_err("ib_dma_mapping_error() failed\n");
+ return -ENOMEM;
+ }
+
+ tx_desc->dma_addr = dma_addr;
+ tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
+ tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+ tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
+
+ isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
+ tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
+ tx_desc->tx_sg[0].lkey);
+
+ return 0;
+}
+
+static void
+isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+ struct ib_send_wr *send_wr)
+{
+ struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
+
+ tx_desc->tx_cqe.done = isert_send_done;
+ send_wr->wr_cqe = &tx_desc->tx_cqe;
+
+ if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
+ send_wr->opcode = IB_WR_SEND_WITH_INV;
+ send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
+ } else {
+ send_wr->opcode = IB_WR_SEND;
+ }
+
+ send_wr->sg_list = &tx_desc->tx_sg[0];
+ send_wr->num_sge = isert_cmd->tx_desc.num_sge;
+ send_wr->send_flags = IB_SEND_SIGNALED;
+}
+
+static int
+isert_login_post_recv(struct isert_conn *isert_conn)
+{
+ struct ib_recv_wr rx_wr;
+ struct ib_sge sge;
+ int ret;
+
+ memset(&sge, 0, sizeof(struct ib_sge));
+ sge.addr = isert_conn->login_desc->dma_addr +
+ isert_get_hdr_offset(isert_conn->login_desc);
+ sge.length = ISER_RX_PAYLOAD_SIZE;
+ sge.lkey = isert_conn->device->pd->local_dma_lkey;
+
+ isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
+ sge.addr, sge.length, sge.lkey);
+
+ isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
+
+ memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
+ rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
+ rx_wr.sg_list = &sge;
+ rx_wr.num_sge = 1;
+
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
+ if (ret)
+ isert_err("ib_post_recv() failed: %d\n", ret);
+
+ return ret;
+}
+
+static int
+isert_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
+ u32 length)
+{
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
+ int ret;
+
+ __isert_create_send_desc(device, tx_desc);
+
+ memcpy(&tx_desc->iscsi_header, &login->rsp[0],
+ sizeof(struct iscsi_hdr));
+
+ isert_init_tx_hdrs(isert_conn, tx_desc);
+
+ if (length > 0) {
+ struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
+
+ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
+ length, DMA_TO_DEVICE);
+
+ memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
+
+ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
+ length, DMA_TO_DEVICE);
+
+ tx_dsg->addr = isert_conn->login_rsp_dma;
+ tx_dsg->length = length;
+ tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
+ tx_desc->num_sge = 2;
+ }
+ if (!login->login_failed) {
+ if (login->login_complete) {
+ ret = isert_alloc_rx_descriptors(isert_conn);
+ if (ret)
+ return ret;
+
+ ret = isert_post_recvm(isert_conn,
+ ISERT_QP_MAX_RECV_DTOS);
+ if (ret)
+ return ret;
+
+ /* Now we are in FULL_FEATURE phase */
+ mutex_lock(&isert_conn->mutex);
+ isert_conn->state = ISER_CONN_FULL_FEATURE;
+ mutex_unlock(&isert_conn->mutex);
+ goto post_send;
+ }
+
+ ret = isert_login_post_recv(isert_conn);
+ if (ret)
+ return ret;
+ }
+post_send:
+ ret = isert_login_post_send(isert_conn, tx_desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+isert_rx_login_req(struct isert_conn *isert_conn)
+{
+ struct iser_rx_desc *rx_desc = isert_conn->login_desc;
+ int rx_buflen = isert_conn->login_req_len;
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsi_login *login = conn->conn_login;
+ int size;
+
+ isert_info("conn %p\n", isert_conn);
+
+ WARN_ON_ONCE(!login);
+
+ if (login->first_request) {
+ struct iscsi_login_req *login_req =
+ (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
+ /*
+ * Setup the initial iscsi_login values from the leading
+ * login request PDU.
+ */
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage =
+ (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
+ >> 2;
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = be32_to_cpu(login_req->cmdsn);
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+ login->cid = be16_to_cpu(login_req->cid);
+ login->tsih = be16_to_cpu(login_req->tsih);
+ }
+
+ memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
+
+ size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
+ isert_dbg("Using login payload size: %d, rx_buflen: %d "
+ "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
+ MAX_KEY_VALUE_PAIRS);
+ memcpy(login->req_buf, isert_get_data(rx_desc), size);
+
+ if (login->first_request) {
+ complete(&isert_conn->login_comp);
+ return;
+ }
+ queue_delayed_work(isert_login_wq, &conn->login_work, 0);
+}
+
+static struct iscsit_cmd
+*isert_allocate_cmd(struct iscsit_conn *conn, struct iser_rx_desc *rx_desc)
+{
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_cmd *isert_cmd;
+ struct iscsit_cmd *cmd;
+
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd) {
+ isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
+ return NULL;
+ }
+ isert_cmd = iscsit_priv_cmd(cmd);
+ isert_cmd->conn = isert_conn;
+ isert_cmd->iscsit_cmd = cmd;
+ isert_cmd->rx_desc = rx_desc;
+
+ return cmd;
+}
+
+static int
+isert_handle_scsi_cmd(struct isert_conn *isert_conn,
+ struct isert_cmd *isert_cmd, struct iscsit_cmd *cmd,
+ struct iser_rx_desc *rx_desc, unsigned char *buf)
+{
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
+ int imm_data, imm_data_len, unsol_data, sg_nents, rc;
+ bool dump_payload = false;
+ unsigned int data_len;
+
+ rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
+ if (rc < 0)
+ return rc;
+
+ imm_data = cmd->immediate_data;
+ imm_data_len = cmd->first_burst_len;
+ unsol_data = cmd->unsolicited_data;
+ data_len = cmd->se_cmd.data_length;
+
+ if (imm_data && imm_data_len == data_len)
+ cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+ if (rc < 0) {
+ return 0;
+ } else if (rc > 0) {
+ dump_payload = true;
+ goto sequence_cmd;
+ }
+
+ if (!imm_data)
+ return 0;
+
+ if (imm_data_len != data_len) {
+ sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
+ sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
+ isert_get_data(rx_desc), imm_data_len);
+ isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
+ sg_nents, imm_data_len);
+ } else {
+ sg_init_table(&isert_cmd->sg, 1);
+ cmd->se_cmd.t_data_sg = &isert_cmd->sg;
+ cmd->se_cmd.t_data_nents = 1;
+ sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
+ imm_data_len);
+ isert_dbg("Transfer Immediate imm_data_len: %d\n",
+ imm_data_len);
+ }
+
+ cmd->write_data_done += imm_data_len;
+
+ if (cmd->write_data_done == cmd->se_cmd.data_length) {
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+ }
+
+sequence_cmd:
+ rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
+
+ if (!rc && !dump_payload && unsol_data)
+ iscsit_set_unsolicited_dataout(cmd);
+ else if (dump_payload && imm_data)
+ target_put_sess_cmd(&cmd->se_cmd);
+
+ return 0;
+}
+
+static int
+isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
+ struct iser_rx_desc *rx_desc, unsigned char *buf)
+{
+ struct scatterlist *sg_start;
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsit_cmd *cmd = NULL;
+ struct iscsi_data *hdr = (struct iscsi_data *)buf;
+ u32 unsol_data_len = ntoh24(hdr->dlength);
+ int rc, sg_nents, sg_off, page_off;
+
+ rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
+ if (rc < 0)
+ return rc;
+ else if (!cmd)
+ return 0;
+ /*
+ * FIXME: Unexpected unsolicited_data out
+ */
+ if (!cmd->unsolicited_data) {
+ isert_err("Received unexpected solicited data payload\n");
+ dump_stack();
+ return -1;
+ }
+
+ isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
+ "write_data_done: %u, data_length: %u\n",
+ unsol_data_len, cmd->write_data_done,
+ cmd->se_cmd.data_length);
+
+ sg_off = cmd->write_data_done / PAGE_SIZE;
+ sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+ sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
+ page_off = cmd->write_data_done % PAGE_SIZE;
+ /*
+ * FIXME: Non page-aligned unsolicited_data out
+ */
+ if (page_off) {
+ isert_err("unexpected non-page aligned data payload\n");
+ dump_stack();
+ return -1;
+ }
+ isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
+ "sg_nents: %u from %p %u\n", sg_start, sg_off,
+ sg_nents, isert_get_data(rx_desc), unsol_data_len);
+
+ sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
+ unsol_data_len);
+
+ rc = iscsit_check_dataout_payload(cmd, hdr, false);
+ if (rc < 0)
+ return rc;
+
+ /*
+ * multiple data-outs on the same command can arrive -
+ * so post the buffer before hand
+ */
+ return isert_post_recv(isert_conn, rx_desc);
+}
+
+static int
+isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+ struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
+ unsigned char *buf)
+{
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
+ int rc;
+
+ rc = iscsit_setup_nop_out(conn, cmd, hdr);
+ if (rc < 0)
+ return rc;
+ /*
+ * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
+ */
+
+ return iscsit_process_nop_out(conn, cmd, hdr);
+}
+
+static int
+isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+ struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
+ struct iscsi_text *hdr)
+{
+ struct iscsit_conn *conn = isert_conn->conn;
+ u32 payload_length = ntoh24(hdr->dlength);
+ int rc;
+ unsigned char *text_in = NULL;
+
+ rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return rc;
+
+ if (payload_length) {
+ text_in = kzalloc(payload_length, GFP_KERNEL);
+ if (!text_in)
+ return -ENOMEM;
+ }
+ cmd->text_in_ptr = text_in;
+
+ memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
+
+ return iscsit_process_text_cmd(conn, cmd, hdr);
+}
+
+static int
+isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
+ uint32_t read_stag, uint64_t read_va,
+ uint32_t write_stag, uint64_t write_va)
+{
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsit_cmd *cmd;
+ struct isert_cmd *isert_cmd;
+ int ret = -EINVAL;
+ u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
+
+ if (conn->sess->sess_ops->SessionType &&
+ (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
+ isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
+ " ignoring\n", opcode);
+ return 0;
+ }
+
+ switch (opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ cmd = isert_allocate_cmd(conn, rx_desc);
+ if (!cmd)
+ break;
+
+ isert_cmd = iscsit_priv_cmd(cmd);
+ isert_cmd->read_stag = read_stag;
+ isert_cmd->read_va = read_va;
+ isert_cmd->write_stag = write_stag;
+ isert_cmd->write_va = write_va;
+ isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
+
+ ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
+ rx_desc, (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ cmd = isert_allocate_cmd(conn, rx_desc);
+ if (!cmd)
+ break;
+
+ isert_cmd = iscsit_priv_cmd(cmd);
+ ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
+ rx_desc, (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
+ (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ cmd = isert_allocate_cmd(conn, rx_desc);
+ if (!cmd)
+ break;
+
+ ret = iscsit_handle_task_mgt_cmd(conn, cmd,
+ (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_LOGOUT:
+ cmd = isert_allocate_cmd(conn, rx_desc);
+ if (!cmd)
+ break;
+
+ ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_TEXT:
+ if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
+ cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+ else
+ cmd = isert_allocate_cmd(conn, rx_desc);
+
+ if (!cmd)
+ break;
+
+ isert_cmd = iscsit_priv_cmd(cmd);
+ ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
+ rx_desc, (struct iscsi_text *)hdr);
+ break;
+ default:
+ isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+ dump_stack();
+ break;
+ }
+
+ return ret;
+}
+
+static void
+isert_print_wc(struct ib_wc *wc, const char *type)
+{
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ isert_err("%s failure: %s (%d) vend_err %x\n", type,
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->vendor_err);
+ else
+ isert_dbg("%s failure: %s (%d)\n", type,
+ ib_wc_status_msg(wc->status), wc->status);
+}
+
+static void
+isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
+ struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
+ uint64_t read_va = 0, write_va = 0;
+ uint32_t read_stag = 0, write_stag = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "recv");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ return;
+ }
+
+ rx_desc->in_use = true;
+
+ ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+
+ isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+ rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
+ (int)(wc->byte_len - ISER_HEADERS_LEN));
+
+ switch (iser_ctrl->flags & 0xF0) {
+ case ISCSI_CTRL:
+ if (iser_ctrl->flags & ISER_RSV) {
+ read_stag = be32_to_cpu(iser_ctrl->read_stag);
+ read_va = be64_to_cpu(iser_ctrl->read_va);
+ isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
+ read_stag, (unsigned long long)read_va);
+ }
+ if (iser_ctrl->flags & ISER_WSV) {
+ write_stag = be32_to_cpu(iser_ctrl->write_stag);
+ write_va = be64_to_cpu(iser_ctrl->write_va);
+ isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
+ write_stag, (unsigned long long)write_va);
+ }
+
+ isert_dbg("ISER ISCSI_CTRL PDU\n");
+ break;
+ case ISER_HELLO:
+ isert_err("iSER Hello message\n");
+ break;
+ default:
+ isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
+ break;
+ }
+
+ isert_rx_opcode(isert_conn, rx_desc,
+ read_stag, read_va, write_stag, write_va);
+
+ ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+}
+
+static void
+isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login recv");
+ return;
+ }
+
+ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+
+ isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
+
+ if (isert_conn->conn) {
+ struct iscsi_login *login = isert_conn->conn->conn_login;
+
+ if (login && !login->first_request)
+ isert_rx_login_req(isert_conn);
+ }
+
+ mutex_lock(&isert_conn->mutex);
+ complete(&isert_conn->login_req_comp);
+ mutex_unlock(&isert_conn->mutex);
+
+ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+}
+
+static void
+isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
+{
+ struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
+
+ if (!cmd->rw.nr_ops)
+ return;
+
+ if (isert_prot_cmd(conn, se_cmd)) {
+ rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
+ conn->cm_id->port_num, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents, dir);
+ } else {
+ rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
+ }
+
+ cmd->rw.nr_ops = 0;
+}
+
+static void
+isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
+{
+ struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
+ struct isert_conn *isert_conn = isert_cmd->conn;
+ struct iscsit_conn *conn = isert_conn->conn;
+ struct iscsi_text_rsp *hdr;
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ iscsit_stop_dataout_timer(cmd);
+ /*
+ * Check for special case during comp_err where
+ * WRITE_PENDING has been handed off from core,
+ * but requires an extra target_put_sess_cmd()
+ * before transport_generic_free_cmd() below.
+ */
+ if (comp_err &&
+ cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ target_put_sess_cmd(se_cmd);
+ }
+ }
+
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ break;
+ case ISCSI_OP_REJECT:
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_TEXT:
+ hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
+ /* If the continue bit is on, keep the command alive */
+ if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
+ break;
+
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ /*
+ * Handle special case for REJECT when iscsi_add_reject*() has
+ * overwritten the original iscsi_opcode assignment, and the
+ * associated cmd->se_cmd needs to be released.
+ */
+ if (cmd->se_cmd.se_tfo != NULL) {
+ isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
+ cmd->iscsi_opcode);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+ break;
+ }
+ fallthrough;
+ default:
+ iscsit_release_cmd(cmd);
+ break;
+ }
+}
+
+static void
+isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
+{
+ if (tx_desc->dma_addr != 0) {
+ isert_dbg("unmap single for tx_desc->dma_addr\n");
+ ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ tx_desc->dma_addr = 0;
+ }
+}
+
+static void
+isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
+ struct ib_device *ib_dev, bool comp_err)
+{
+ if (isert_cmd->pdu_buf_dma != 0) {
+ isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
+ ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
+ isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
+ isert_cmd->pdu_buf_dma = 0;
+ }
+
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+ isert_put_cmd(isert_cmd, comp_err);
+}
+
+static int
+isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
+{
+ struct ib_mr_status mr_status;
+ int ret;
+
+ ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ isert_err("ib_check_mr_status failed, ret %d\n", ret);
+ goto fail_mr_status;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ u64 sec_offset_err;
+ u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
+
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ break;
+ }
+ sec_offset_err = mr_status.sig_err.sig_err_offset;
+ do_div(sec_offset_err, block_size);
+ se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba;
+
+ isert_err("PI error found type %d at sector 0x%llx "
+ "expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ (unsigned long long)se_cmd->sense_info,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ ret = 1;
+ }
+
+fail_mr_status:
+ return ret;
+}
+
+static void
+isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
+ struct se_cmd *cmd = &isert_cmd->iscsit_cmd->se_cmd;
+ int ret = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma write");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
+ }
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+
+ if (ret) {
+ /*
+ * transport_generic_request_failure() expects to have
+ * plus two references to handle queue-full, so re-add
+ * one here as target-core will have already dropped
+ * it after the first isert_put_datain() callback.
+ */
+ kref_get(&cmd->cmd_kref);
+ transport_generic_request_failure(cmd, cmd->pi_err);
+ } else {
+ /*
+ * XXX: isert_put_response() failure is not retried.
+ */
+ ret = isert_put_response(isert_conn->conn, isert_cmd->iscsit_cmd);
+ if (ret)
+ pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
+ }
+}
+
+static void
+isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
+ struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ int ret = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma read");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
+ }
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ iscsit_stop_dataout_timer(cmd);
+
+ if (isert_prot_cmd(isert_conn, se_cmd))
+ ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+ cmd->write_data_done = 0;
+
+ isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ /*
+ * transport_generic_request_failure() will drop the extra
+ * se_cmd->cmd_kref reference after T10-PI error, and handle
+ * any non-zero ->queue_status() callback error retries.
+ */
+ if (ret)
+ transport_generic_request_failure(se_cmd, se_cmd->pi_err);
+ else
+ target_execute_cmd(se_cmd);
+}
+
+static void
+isert_do_control_comp(struct work_struct *work)
+{
+ struct isert_cmd *isert_cmd = container_of(work,
+ struct isert_cmd, comp_work);
+ struct isert_conn *isert_conn = isert_cmd->conn;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
+
+ isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
+
+ switch (cmd->i_state) {
+ case ISTATE_SEND_TASKMGTRSP:
+ iscsit_tmr_post_handler(cmd, cmd->conn);
+ fallthrough;
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
+ cmd->i_state = ISTATE_SENT_STATUS;
+ isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
+ ib_dev, false);
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ iscsit_logout_post_handler(cmd, cmd->conn);
+ break;
+ default:
+ isert_err("Unknown i_state %d\n", cmd->i_state);
+ dump_stack();
+ break;
+ }
+}
+
+static void
+isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ }
+
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+}
+
+static void
+isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
+ return;
+ }
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ switch (isert_cmd->iscsit_cmd->i_state) {
+ case ISTATE_SEND_TASKMGTRSP:
+ case ISTATE_SEND_LOGOUTRSP:
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+
+ INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
+ queue_work(isert_comp_wq, &isert_cmd->comp_work);
+ return;
+ default:
+ isert_cmd->iscsit_cmd->i_state = ISTATE_SENT_STATUS;
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
+ break;
+ }
+}
+
+static int
+isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
+{
+ int ret;
+
+ ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+ if (ret)
+ return ret;
+
+ ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
+ if (ret) {
+ isert_err("ib_post_send failed with %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int
+isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+ struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
+ &isert_cmd->tx_desc.iscsi_header;
+
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+ iscsit_build_rsp_pdu(cmd, conn, true, hdr);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ /*
+ * Attach SENSE DATA payload to iSCSI Response PDU
+ */
+ if (cmd->se_cmd.sense_buffer &&
+ ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+ (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+ u32 padding, pdu_len;
+
+ put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
+ cmd->sense_buffer);
+ cmd->se_cmd.scsi_sense_length += sizeof(__be16);
+
+ padding = -(cmd->se_cmd.scsi_sense_length) & 3;
+ hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
+ pdu_len = cmd->se_cmd.scsi_sense_length + padding;
+
+ isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
+ (void *)cmd->sense_buffer, pdu_len,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
+
+ isert_cmd->pdu_buf_len = pdu_len;
+ tx_dsg->addr = isert_cmd->pdu_buf_dma;
+ tx_dsg->length = pdu_len;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
+ isert_cmd->tx_desc.num_sge = 2;
+ }
+
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
+
+ isert_dbg("Posting SCSI Response\n");
+
+ return isert_post_response(isert_conn, isert_cmd);
+}
+
+static void
+isert_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+}
+
+static enum target_prot_op
+isert_get_sup_prot_ops(struct iscsit_conn *conn)
+{
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
+
+ if (conn->tpg->tpg_attrib.t10_pi) {
+ if (device->pi_capable) {
+ isert_info("conn %p PI offload enabled\n", isert_conn);
+ isert_conn->pi_support = true;
+ return TARGET_PROT_ALL;
+ }
+ }
+
+ isert_info("conn %p PI offload disabled\n", isert_conn);
+ isert_conn->pi_support = false;
+
+ return TARGET_PROT_NORMAL;
+}
+
+static int
+isert_put_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
+ bool nopout_response)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+ iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
+ &isert_cmd->tx_desc.iscsi_header,
+ nopout_response);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
+
+ isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
+
+ return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+ iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
+
+ isert_dbg("conn %p Posting Logout Response\n", isert_conn);
+
+ return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_tm_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+ iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
+
+ isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
+
+ return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+ struct iscsi_reject *hdr =
+ (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
+
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+ iscsit_build_reject(cmd, conn, hdr);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
+ isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
+ (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
+ isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
+ tx_dsg->addr = isert_cmd->pdu_buf_dma;
+ tx_dsg->length = ISCSI_HDR_LEN;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
+ isert_cmd->tx_desc.num_sge = 2;
+
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
+
+ isert_dbg("conn %p Posting Reject\n", isert_conn);
+
+ return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+ struct iscsi_text_rsp *hdr =
+ (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
+ u32 txt_rsp_len;
+ int rc;
+
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+ rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
+ if (rc < 0)
+ return rc;
+
+ txt_rsp_len = rc;
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+
+ if (txt_rsp_len) {
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+ void *txt_rsp_buf = cmd->buf_ptr;
+
+ isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
+ txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+ return -ENOMEM;
+
+ isert_cmd->pdu_buf_len = txt_rsp_len;
+ tx_dsg->addr = isert_cmd->pdu_buf_dma;
+ tx_dsg->length = txt_rsp_len;
+ tx_dsg->lkey = device->pd->local_dma_lkey;
+ isert_cmd->tx_desc.num_sge = 2;
+ }
+ isert_init_send_wr(isert_conn, isert_cmd, send_wr);
+
+ isert_dbg("conn %p Text Response\n", isert_conn);
+
+ return isert_post_response(isert_conn, isert_cmd);
+}
+
+static inline void
+isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
+ domain->sig.dif.ref_tag = se_cmd->reftag_seed;
+ /*
+ * At the moment we hard code those, but if in the future
+ * the target core would like to use it, we will take it
+ * from se_cmd.
+ */
+ domain->sig.dif.apptag_check_mask = 0xffff;
+ domain->sig.dif.app_escape = true;
+ domain->sig.dif.ref_escape = true;
+ if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
+ se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
+ domain->sig.dif.ref_remap = true;
+}
+
+static int
+isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
+{
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_STRIP:
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
+ isert_set_dif_domain(se_cmd, &sig_attrs->wire);
+ break;
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
+ isert_set_dif_domain(se_cmd, &sig_attrs->mem);
+ break;
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ isert_set_dif_domain(se_cmd, &sig_attrs->wire);
+ isert_set_dif_domain(se_cmd, &sig_attrs->mem);
+ break;
+ default:
+ isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+ return -EINVAL;
+ }
+
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
+
+ return 0;
+}
+
+static int
+isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
+ u8 port_num = conn->cm_id->port_num;
+ u64 addr;
+ u32 rkey, offset;
+ int ret;
+
+ if (cmd->ctx_init_done)
+ goto rdma_ctx_post;
+
+ if (dir == DMA_FROM_DEVICE) {
+ addr = cmd->write_va;
+ rkey = cmd->write_stag;
+ offset = cmd->iscsit_cmd->write_data_done;
+ } else {
+ addr = cmd->read_va;
+ rkey = cmd->read_stag;
+ offset = 0;
+ }
+
+ if (isert_prot_cmd(conn, se_cmd)) {
+ struct ib_sig_attrs sig_attrs;
+
+ ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+ if (ret)
+ return ret;
+
+ WARN_ON_ONCE(offset);
+ ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ se_cmd->t_prot_sg, se_cmd->t_prot_nents,
+ &sig_attrs, addr, rkey, dir);
+ } else {
+ ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ offset, addr, rkey, dir);
+ }
+
+ if (ret < 0) {
+ isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
+ return ret;
+ }
+
+ cmd->ctx_init_done = true;
+
+rdma_ctx_post:
+ ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
+ if (ret < 0)
+ isert_err("Cmd: %p failed to post RDMA res\n", cmd);
+ return ret;
+}
+
+static int
+isert_put_datain(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ struct isert_conn *isert_conn = conn->context;
+ struct ib_cqe *cqe = NULL;
+ struct ib_send_wr *chain_wr = NULL;
+ int rc;
+
+ isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
+ isert_cmd, se_cmd->data_length);
+
+ if (isert_prot_cmd(isert_conn, se_cmd)) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+ cqe = &isert_cmd->tx_desc.tx_cqe;
+ } else {
+ /*
+ * Build isert_conn->tx_desc for iSCSI response PDU and attach
+ */
+ isert_create_send_desc(isert_conn, isert_cmd,
+ &isert_cmd->tx_desc);
+ iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_conn, isert_cmd,
+ &isert_cmd->tx_desc.send_wr);
+
+ rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+ if (rc)
+ return rc;
+
+ chain_wr = &isert_cmd->tx_desc.send_wr;
+ }
+
+ rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
+ isert_cmd, rc);
+ return rc;
+}
+
+static int
+isert_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, bool recovery)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ int ret;
+
+ isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
+ isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
+
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+ ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+ &isert_cmd->tx_desc.tx_cqe, NULL);
+
+ isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
+ isert_cmd, ret);
+ return ret;
+}
+
+static int
+isert_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
+{
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ int ret = 0;
+
+ switch (state) {
+ case ISTATE_REMOVE:
+ spin_lock_bh(&conn->cmd_lock);
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+ isert_put_cmd(isert_cmd, true);
+ break;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ ret = isert_put_nopin(cmd, conn, false);
+ break;
+ default:
+ isert_err("Unknown immediate state: 0x%02x\n", state);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+isert_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
+{
+ struct isert_conn *isert_conn = conn->context;
+ int ret;
+
+ switch (state) {
+ case ISTATE_SEND_LOGOUTRSP:
+ ret = isert_put_logout_rsp(cmd, conn);
+ if (!ret)
+ isert_conn->logout_posted = true;
+ break;
+ case ISTATE_SEND_NOPIN:
+ ret = isert_put_nopin(cmd, conn, true);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ ret = isert_put_tm_rsp(cmd, conn);
+ break;
+ case ISTATE_SEND_REJECT:
+ ret = isert_put_reject(cmd, conn);
+ break;
+ case ISTATE_SEND_TEXTRSP:
+ ret = isert_put_text_rsp(cmd, conn);
+ break;
+ case ISTATE_SEND_STATUS:
+ /*
+ * Special case for sending non GOOD SCSI status from TX thread
+ * context during pre se_cmd excecution failure.
+ */
+ ret = isert_put_response(conn, cmd);
+ break;
+ default:
+ isert_err("Unknown response state: 0x%02x\n", state);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+struct rdma_cm_id *
+isert_setup_id(struct isert_np *isert_np)
+{
+ struct iscsi_np *np = isert_np->np;
+ struct rdma_cm_id *id;
+ struct sockaddr *sa;
+ int ret;
+
+ sa = (struct sockaddr *)&np->np_sockaddr;
+ isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
+
+ id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(id)) {
+ isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
+ ret = PTR_ERR(id);
+ goto out;
+ }
+ isert_dbg("id %p context %p\n", id, id->context);
+
+ /*
+ * Allow both IPv4 and IPv6 sockets to bind a single port
+ * at the same time.
+ */
+ ret = rdma_set_afonly(id, 1);
+ if (ret) {
+ isert_err("rdma_set_afonly() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ ret = rdma_bind_addr(id, sa);
+ if (ret) {
+ isert_err("rdma_bind_addr() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ ret = rdma_listen(id, 0);
+ if (ret) {
+ isert_err("rdma_listen() failed: %d\n", ret);
+ goto out_id;
+ }
+
+ return id;
+out_id:
+ rdma_destroy_id(id);
+out:
+ return ERR_PTR(ret);
+}
+
+static int
+isert_setup_np(struct iscsi_np *np,
+ struct sockaddr_storage *ksockaddr)
+{
+ struct isert_np *isert_np;
+ struct rdma_cm_id *isert_lid;
+ int ret;
+
+ isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
+ if (!isert_np)
+ return -ENOMEM;
+
+ sema_init(&isert_np->sem, 0);
+ mutex_init(&isert_np->mutex);
+ INIT_LIST_HEAD(&isert_np->accepted);
+ INIT_LIST_HEAD(&isert_np->pending);
+ isert_np->np = np;
+
+ /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+ */
+ memcpy(&np->np_sockaddr, ksockaddr,
+ sizeof(struct sockaddr_storage));
+
+ isert_lid = isert_setup_id(isert_np);
+ if (IS_ERR(isert_lid)) {
+ ret = PTR_ERR(isert_lid);
+ goto out;
+ }
+
+ isert_np->cm_id = isert_lid;
+ np->np_context = isert_np;
+
+ return 0;
+
+out:
+ kfree(isert_np);
+
+ return ret;
+}
+
+static int
+isert_rdma_accept(struct isert_conn *isert_conn)
+{
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
+ struct rdma_conn_param cp;
+ int ret;
+ struct iser_cm_hdr rsp_hdr;
+
+ memset(&cp, 0, sizeof(struct rdma_conn_param));
+ cp.initiator_depth = isert_conn->initiator_depth;
+ cp.retry_count = 7;
+ cp.rnr_retry_count = 7;
+
+ memset(&rsp_hdr, 0, sizeof(rsp_hdr));
+ rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
+ if (!isert_conn->snd_w_inv)
+ rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
+ cp.private_data = (void *)&rsp_hdr;
+ cp.private_data_len = sizeof(rsp_hdr);
+
+ ret = rdma_accept(cm_id, &cp);
+ if (ret) {
+ isert_err("rdma_accept() failed with: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+isert_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
+{
+ struct isert_conn *isert_conn = conn->context;
+ int ret;
+
+ isert_info("before login_req comp conn: %p\n", isert_conn);
+ ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
+ if (ret) {
+ isert_err("isert_conn %p interrupted before got login req\n",
+ isert_conn);
+ return ret;
+ }
+ reinit_completion(&isert_conn->login_req_comp);
+
+ /*
+ * For login requests after the first PDU, isert_rx_login_req() will
+ * kick queue_delayed_work(isert_login_wq, &conn->login_work) as
+ * the packet is received, which turns this callback from
+ * iscsi_target_do_login_rx() into a NOP.
+ */
+ if (!login->first_request)
+ return 0;
+
+ isert_rx_login_req(isert_conn);
+
+ isert_info("before login_comp conn: %p\n", conn);
+ ret = wait_for_completion_interruptible(&isert_conn->login_comp);
+ if (ret)
+ return ret;
+
+ isert_info("processing login->req: %p\n", login->req);
+
+ return 0;
+}
+
+static void
+isert_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
+ struct isert_conn *isert_conn)
+{
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
+ struct rdma_route *cm_route = &cm_id->route;
+
+ conn->login_family = np->np_sockaddr.ss_family;
+
+ conn->login_sockaddr = cm_route->addr.dst_addr;
+ conn->local_sockaddr = cm_route->addr.src_addr;
+}
+
+static int
+isert_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
+{
+ struct isert_np *isert_np = np->np_context;
+ struct isert_conn *isert_conn;
+ int ret;
+
+accept_wait:
+ ret = down_interruptible(&isert_np->sem);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+ isert_dbg("np_thread_state %d\n",
+ np->np_thread_state);
+ /*
+ * No point in stalling here when np_thread
+ * is in state RESET/SHUTDOWN/EXIT - bail
+ */
+ return -ENODEV;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ mutex_lock(&isert_np->mutex);
+ if (list_empty(&isert_np->pending)) {
+ mutex_unlock(&isert_np->mutex);
+ goto accept_wait;
+ }
+ isert_conn = list_first_entry(&isert_np->pending,
+ struct isert_conn, node);
+ list_del_init(&isert_conn->node);
+ mutex_unlock(&isert_np->mutex);
+
+ conn->context = isert_conn;
+ isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
+
+ isert_set_conn_info(np, conn, isert_conn);
+
+ isert_dbg("Processing isert_conn: %p\n", isert_conn);
+
+ return 0;
+}
+
+static void
+isert_free_np(struct iscsi_np *np)
+{
+ struct isert_np *isert_np = np->np_context;
+ struct isert_conn *isert_conn, *n;
+ LIST_HEAD(drop_conn_list);
+
+ if (isert_np->cm_id)
+ rdma_destroy_id(isert_np->cm_id);
+
+ /*
+ * FIXME: At this point we don't have a good way to insure
+ * that at this point we don't have hanging connections that
+ * completed RDMA establishment but didn't start iscsi login
+ * process. So work-around this by cleaning up what ever piled
+ * up in accepted and pending lists.
+ */
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_np->pending)) {
+ isert_info("Still have isert pending connections\n");
+ list_for_each_entry_safe(isert_conn, n,
+ &isert_np->pending,
+ node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
+ }
+ }
+
+ if (!list_empty(&isert_np->accepted)) {
+ isert_info("Still have isert accepted connections\n");
+ list_for_each_entry_safe(isert_conn, n,
+ &isert_np->accepted,
+ node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
+ }
+ }
+ mutex_unlock(&isert_np->mutex);
+
+ list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
+ list_del_init(&isert_conn->node);
+ isert_connect_release(isert_conn);
+ }
+
+ np->np_context = NULL;
+ kfree(isert_np);
+}
+
+static void isert_release_work(struct work_struct *work)
+{
+ struct isert_conn *isert_conn = container_of(work,
+ struct isert_conn,
+ release_work);
+
+ isert_info("Starting release conn %p\n", isert_conn);
+
+ mutex_lock(&isert_conn->mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->mutex);
+
+ isert_info("Destroying conn %p\n", isert_conn);
+ isert_put_conn(isert_conn);
+}
+
+static void
+isert_wait4logout(struct isert_conn *isert_conn)
+{
+ struct iscsit_conn *conn = isert_conn->conn;
+
+ isert_info("conn %p\n", isert_conn);
+
+ if (isert_conn->logout_posted) {
+ isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ }
+}
+
+static void
+isert_wait4cmds(struct iscsit_conn *conn)
+{
+ isert_info("iscsit_conn %p\n", conn);
+
+ if (conn->sess) {
+ target_stop_cmd_counter(conn->cmd_cnt);
+ target_wait_for_cmds(conn->cmd_cnt);
+ }
+}
+
+/**
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for
+ * unsolicitate dataout
+ * @conn: iscsi connection
+ *
+ * We might still have commands that are waiting for unsolicited
+ * dataouts messages. We must put the extra reference on those
+ * before blocking on the target_wait_for_session_cmds
+ */
+static void
+isert_put_unsol_pending_cmds(struct iscsit_conn *conn)
+{
+ struct iscsit_cmd *cmd, *tmp;
+ static LIST_HEAD(drop_cmd_list);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
+ if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
+ (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
+ (cmd->write_data_done < cmd->se_cmd.data_length))
+ list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
+ list_del_init(&cmd->i_conn_node);
+ if (cmd->i_state != ISTATE_REMOVE) {
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+
+ isert_info("conn %p dropping cmd %p\n", conn, cmd);
+ isert_put_cmd(isert_cmd, true);
+ }
+ }
+}
+
+static void isert_wait_conn(struct iscsit_conn *conn)
+{
+ struct isert_conn *isert_conn = conn->context;
+
+ isert_info("Starting conn %p\n", isert_conn);
+
+ mutex_lock(&isert_conn->mutex);
+ isert_conn_terminate(isert_conn);
+ mutex_unlock(&isert_conn->mutex);
+
+ ib_drain_qp(isert_conn->qp);
+ isert_put_unsol_pending_cmds(conn);
+ isert_wait4cmds(conn);
+ isert_wait4logout(isert_conn);
+
+ queue_work(isert_release_wq, &isert_conn->release_work);
+}
+
+static void isert_free_conn(struct iscsit_conn *conn)
+{
+ struct isert_conn *isert_conn = conn->context;
+
+ ib_drain_qp(isert_conn->qp);
+ isert_put_conn(isert_conn);
+}
+
+static void isert_get_rx_pdu(struct iscsit_conn *conn)
+{
+ struct completion comp;
+
+ init_completion(&comp);
+
+ wait_for_completion_interruptible(&comp);
+}
+
+static struct iscsit_transport iser_target_transport = {
+ .name = "IB/iSER",
+ .transport_type = ISCSI_INFINIBAND,
+ .rdma_shutdown = true,
+ .priv_size = sizeof(struct isert_cmd),
+ .owner = THIS_MODULE,
+ .iscsit_setup_np = isert_setup_np,
+ .iscsit_accept_np = isert_accept_np,
+ .iscsit_free_np = isert_free_np,
+ .iscsit_wait_conn = isert_wait_conn,
+ .iscsit_free_conn = isert_free_conn,
+ .iscsit_get_login_rx = isert_get_login_rx,
+ .iscsit_put_login_tx = isert_put_login_tx,
+ .iscsit_immediate_queue = isert_immediate_queue,
+ .iscsit_response_queue = isert_response_queue,
+ .iscsit_get_dataout = isert_get_dataout,
+ .iscsit_queue_data_in = isert_put_datain,
+ .iscsit_queue_status = isert_put_response,
+ .iscsit_aborted_task = isert_aborted_task,
+ .iscsit_get_rx_pdu = isert_get_rx_pdu,
+ .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
+};
+
+static int __init isert_init(void)
+{
+ isert_login_wq = alloc_workqueue("isert_login_wq", 0, 0);
+ if (!isert_login_wq) {
+ isert_err("Unable to allocate isert_login_wq\n");
+ return -ENOMEM;
+ }
+
+ isert_comp_wq = alloc_workqueue("isert_comp_wq",
+ WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!isert_comp_wq) {
+ isert_err("Unable to allocate isert_comp_wq\n");
+ goto destroy_login_wq;
+ }
+
+ isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!isert_release_wq) {
+ isert_err("Unable to allocate isert_release_wq\n");
+ goto destroy_comp_wq;
+ }
+
+ iscsit_register_transport(&iser_target_transport);
+ isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
+
+ return 0;
+
+destroy_comp_wq:
+ destroy_workqueue(isert_comp_wq);
+destroy_login_wq:
+ destroy_workqueue(isert_login_wq);
+
+ return -ENOMEM;
+}
+
+static void __exit isert_exit(void)
+{
+ flush_workqueue(isert_login_wq);
+ destroy_workqueue(isert_release_wq);
+ destroy_workqueue(isert_comp_wq);
+ iscsit_unregister_transport(&iser_target_transport);
+ isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
+ destroy_workqueue(isert_login_wq);
+}
+
+MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(isert_init);
+module_exit(isert_exit);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
new file mode 100644
index 000000000..0b2dfd6e7
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+#include <scsi/iser.h>
+
+
+#define DRV_NAME "isert"
+#define PFX DRV_NAME ": "
+
+#define isert_dbg(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 2)) \
+ printk(KERN_DEBUG PFX "%s: " fmt,\
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_warn(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 0)) \
+ pr_warn(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_info(fmt, arg...) \
+ do { \
+ if (unlikely(isert_debug_level > 1)) \
+ pr_info(PFX "%s: " fmt, \
+ __func__ , ## arg); \
+ } while (0)
+
+#define isert_err(fmt, arg...) \
+ pr_err(PFX "%s: " fmt, __func__ , ## arg)
+
+/* Constant PDU lengths calculations */
+#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \
+ sizeof(struct iscsi_hdr))
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
+
+/* QP settings */
+/* Maximal bounds on received asynchronous PDUs */
+#define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
+
+#define ISERT_MAX_RX_MISC_PDUS 6 /*
+ * NOOP_OUT(2), TEXT(1),
+ * SCSI_TMFUNC(2), LOGOUT(1)
+ */
+
+#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */
+
+#define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
+
+#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
+
+#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
+ ISERT_MAX_TX_MISC_PDUS + \
+ ISERT_MAX_RX_MISC_PDUS)
+
+/*
+ * RX size is default of 8k plus headers, but data needs to align to
+ * 512 boundary, so use 1024 to have the extra space for alignment.
+ */
+#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
+
+/* Minimum I/O size is 512KB */
+#define ISCSI_ISER_MIN_SG_TABLESIZE 128
+
+/* Maximum support is 16MB I/O size */
+#define ISCSI_ISER_MAX_SG_TABLESIZE 4096
+
+enum isert_desc_type {
+ ISCSI_TX_CONTROL,
+ ISCSI_TX_DATAIN
+};
+
+enum iser_conn_state {
+ ISER_CONN_INIT,
+ ISER_CONN_UP,
+ ISER_CONN_BOUND,
+ ISER_CONN_FULL_FEATURE,
+ ISER_CONN_TERMINATING,
+ ISER_CONN_DOWN,
+};
+
+struct iser_rx_desc {
+ char buf[ISER_RX_SIZE];
+ u64 dma_addr;
+ struct ib_sge rx_sg;
+ struct ib_cqe rx_cqe;
+ bool in_use;
+};
+
+static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_rx_desc, rx_cqe);
+}
+
+static void *isert_get_iser_hdr(struct iser_rx_desc *desc)
+{
+ return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN;
+}
+
+static size_t isert_get_hdr_offset(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) - (void *)desc->buf;
+}
+
+static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl);
+}
+
+static void *isert_get_data(struct iser_rx_desc *desc)
+{
+ void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN;
+
+ WARN_ON((uintptr_t)data & 511);
+ return data;
+}
+
+struct iser_tx_desc {
+ struct iser_ctrl iser_header;
+ struct iscsi_hdr iscsi_header;
+ enum isert_desc_type type;
+ u64 dma_addr;
+ struct ib_sge tx_sg[2];
+ struct ib_cqe tx_cqe;
+ int num_sge;
+ struct ib_send_wr send_wr;
+} __packed;
+
+static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_tx_desc, tx_cqe);
+}
+
+struct isert_cmd {
+ uint32_t read_stag;
+ uint32_t write_stag;
+ uint64_t read_va;
+ uint64_t write_va;
+ uint32_t inv_rkey;
+ u64 pdu_buf_dma;
+ u32 pdu_buf_len;
+ struct isert_conn *conn;
+ struct iscsit_cmd *iscsit_cmd;
+ struct iser_tx_desc tx_desc;
+ struct iser_rx_desc *rx_desc;
+ struct rdma_rw_ctx rw;
+ struct work_struct comp_work;
+ struct scatterlist sg;
+ bool ctx_init_done;
+};
+
+static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
+{
+ return container_of(desc, struct isert_cmd, tx_desc);
+}
+
+struct isert_device;
+
+struct isert_conn {
+ enum iser_conn_state state;
+ u32 responder_resources;
+ u32 initiator_depth;
+ bool pi_support;
+ struct iser_rx_desc *login_desc;
+ char *login_rsp_buf;
+ int login_req_len;
+ u64 login_rsp_dma;
+ struct iser_rx_desc *rx_descs;
+ struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS];
+ struct iscsit_conn *conn;
+ struct list_head node;
+ struct completion login_comp;
+ struct completion login_req_comp;
+ struct iser_tx_desc login_tx_desc;
+ struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ u32 cq_size;
+ struct isert_device *device;
+ struct mutex mutex;
+ struct kref kref;
+ struct work_struct release_work;
+ bool logout_posted;
+ bool snd_w_inv;
+ wait_queue_head_t rem_wait;
+ bool dev_removed;
+};
+
+struct isert_device {
+ bool pi_capable;
+ int refcount;
+ struct ib_device *ib_device;
+ struct ib_pd *pd;
+ struct isert_comp *comps;
+ int comps_used;
+ struct list_head dev_node;
+};
+
+struct isert_np {
+ struct iscsi_np *np;
+ struct semaphore sem;
+ struct rdma_cm_id *cm_id;
+ struct mutex mutex;
+ struct list_head accepted;
+ struct list_head pending;
+};
diff --git a/drivers/infiniband/ulp/opa_vnic/Kconfig b/drivers/infiniband/ulp/opa_vnic/Kconfig
new file mode 100644
index 000000000..4d43d055f
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_OPA_VNIC
+ tristate "Cornelis OPX VNIC support"
+ depends on X86_64 && INFINIBAND
+ help
+ This is Omni-Path Express (OPX) Virtual Network Interface Controller (VNIC)
+ driver for Ethernet over Omni-Path feature. It implements the HW
+ independent VNIC functionality. It interfaces with Linux stack for
+ data path and IB MAD for the control path.
diff --git a/drivers/infiniband/ulp/opa_vnic/Makefile b/drivers/infiniband/ulp/opa_vnic/Makefile
new file mode 100644
index 000000000..196183817
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile - Cornelis Omni-Path Express Virtual Network Controller driver
+# Copyright(c) 2017, Intel Corporation.
+# Copyright(c) 2021, Cornelis Networks.
+#
+obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic.o
+
+opa_vnic-y := opa_vnic_netdev.o opa_vnic_encap.o opa_vnic_ethtool.o \
+ opa_vnic_vema.o opa_vnic_vema_iface.o
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
new file mode 100644
index 000000000..31cd36141
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC encapsulation/decapsulation function.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+
+#include "opa_vnic_internal.h"
+
+/* OPA 16B Header fields */
+#define OPA_16B_LID_MASK 0xFFFFFull
+#define OPA_16B_SLID_HIGH_SHFT 8
+#define OPA_16B_SLID_MASK 0xF00ull
+#define OPA_16B_DLID_MASK 0xF000ull
+#define OPA_16B_DLID_HIGH_SHFT 12
+#define OPA_16B_LEN_SHFT 20
+#define OPA_16B_SC_SHFT 20
+#define OPA_16B_RC_SHFT 25
+#define OPA_16B_PKEY_SHFT 16
+
+#define OPA_VNIC_L4_HDR_SHFT 16
+
+/* L2+L4 hdr len is 20 bytes (5 quad words) */
+#define OPA_VNIC_HDR_QW_LEN 5
+
+static inline void opa_vnic_make_header(u8 *hdr, u32 slid, u32 dlid, u16 len,
+ u16 pkey, u16 entropy, u8 sc, u8 rc,
+ u8 l4_type, u16 l4_hdr)
+{
+ /* h[1]: LT=1, 16B L2=10 */
+ u32 h[OPA_VNIC_HDR_QW_LEN] = {0, 0xc0000000, 0, 0, 0};
+
+ h[2] = l4_type;
+ h[3] = entropy;
+ h[4] = l4_hdr << OPA_VNIC_L4_HDR_SHFT;
+
+ /* Extract and set 4 upper bits and 20 lower bits of the lids */
+ h[0] |= (slid & OPA_16B_LID_MASK);
+ h[2] |= ((slid >> (20 - OPA_16B_SLID_HIGH_SHFT)) & OPA_16B_SLID_MASK);
+
+ h[1] |= (dlid & OPA_16B_LID_MASK);
+ h[2] |= ((dlid >> (20 - OPA_16B_DLID_HIGH_SHFT)) & OPA_16B_DLID_MASK);
+
+ h[0] |= (len << OPA_16B_LEN_SHFT);
+ h[1] |= (rc << OPA_16B_RC_SHFT);
+ h[1] |= (sc << OPA_16B_SC_SHFT);
+ h[2] |= ((u32)pkey << OPA_16B_PKEY_SHFT);
+
+ memcpy(hdr, h, OPA_VNIC_HDR_LEN);
+}
+
+/*
+ * Using a simple hash table for mac table implementation with the last octet
+ * of mac address as a key.
+ */
+static void opa_vnic_free_mac_tbl(struct hlist_head *mactbl)
+{
+ struct opa_vnic_mac_tbl_node *node;
+ struct hlist_node *tmp;
+ int bkt;
+
+ if (!mactbl)
+ return;
+
+ vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) {
+ hash_del(&node->hlist);
+ kfree(node);
+ }
+ kfree(mactbl);
+}
+
+static struct hlist_head *opa_vnic_alloc_mac_tbl(void)
+{
+ u32 size = sizeof(struct hlist_head) * OPA_VNIC_MAC_TBL_SIZE;
+ struct hlist_head *mactbl;
+
+ mactbl = kzalloc(size, GFP_KERNEL);
+ if (!mactbl)
+ return ERR_PTR(-ENOMEM);
+
+ vnic_hash_init(mactbl);
+ return mactbl;
+}
+
+/* opa_vnic_release_mac_tbl - empty and free the mac table */
+void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter)
+{
+ struct hlist_head *mactbl;
+
+ mutex_lock(&adapter->mactbl_lock);
+ mactbl = rcu_access_pointer(adapter->mactbl);
+ rcu_assign_pointer(adapter->mactbl, NULL);
+ synchronize_rcu();
+ opa_vnic_free_mac_tbl(mactbl);
+ adapter->info.vport.mac_tbl_digest = 0;
+ mutex_unlock(&adapter->mactbl_lock);
+}
+
+/*
+ * opa_vnic_query_mac_tbl - query the mac table for a section
+ *
+ * This function implements query of specific function of the mac table.
+ * The function also expects the requested range to be valid.
+ */
+void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl)
+{
+ struct opa_vnic_mac_tbl_node *node;
+ struct hlist_head *mactbl;
+ int bkt;
+ u16 loffset, lnum_entries;
+
+ rcu_read_lock();
+ mactbl = rcu_dereference(adapter->mactbl);
+ if (!mactbl)
+ goto get_mac_done;
+
+ loffset = be16_to_cpu(tbl->offset);
+ lnum_entries = be16_to_cpu(tbl->num_entries);
+
+ vnic_hash_for_each(mactbl, bkt, node, hlist) {
+ struct __opa_vnic_mactable_entry *nentry = &node->entry;
+ struct opa_veswport_mactable_entry *entry;
+
+ if ((node->index < loffset) ||
+ (node->index >= (loffset + lnum_entries)))
+ continue;
+
+ /* populate entry in the tbl corresponding to the index */
+ entry = &tbl->tbl_entries[node->index - loffset];
+ memcpy(entry->mac_addr, nentry->mac_addr,
+ ARRAY_SIZE(entry->mac_addr));
+ memcpy(entry->mac_addr_mask, nentry->mac_addr_mask,
+ ARRAY_SIZE(entry->mac_addr_mask));
+ entry->dlid_sd = cpu_to_be32(nentry->dlid_sd);
+ }
+ tbl->mac_tbl_digest = cpu_to_be32(adapter->info.vport.mac_tbl_digest);
+get_mac_done:
+ rcu_read_unlock();
+}
+
+/*
+ * opa_vnic_update_mac_tbl - update mac table section
+ *
+ * This function updates the specified section of the mac table.
+ * The procedure includes following steps.
+ * - Allocate a new mac (hash) table.
+ * - Add the specified entries to the new table.
+ * (except the ones that are requested to be deleted).
+ * - Add all the other entries from the old mac table.
+ * - If there is a failure, free the new table and return.
+ * - Switch to the new table.
+ * - Free the old table and return.
+ *
+ * The function also expects the requested range to be valid.
+ */
+int opa_vnic_update_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl)
+{
+ struct opa_vnic_mac_tbl_node *node, *new_node;
+ struct hlist_head *new_mactbl, *old_mactbl;
+ int i, bkt, rc = 0;
+ u8 key;
+ u16 loffset, lnum_entries;
+
+ mutex_lock(&adapter->mactbl_lock);
+ /* allocate new mac table */
+ new_mactbl = opa_vnic_alloc_mac_tbl();
+ if (IS_ERR(new_mactbl)) {
+ mutex_unlock(&adapter->mactbl_lock);
+ return PTR_ERR(new_mactbl);
+ }
+
+ loffset = be16_to_cpu(tbl->offset);
+ lnum_entries = be16_to_cpu(tbl->num_entries);
+
+ /* add updated entries to the new mac table */
+ for (i = 0; i < lnum_entries; i++) {
+ struct __opa_vnic_mactable_entry *nentry;
+ struct opa_veswport_mactable_entry *entry =
+ &tbl->tbl_entries[i];
+ u8 *mac_addr = entry->mac_addr;
+ u8 empty_mac[ETH_ALEN] = { 0 };
+
+ v_dbg("new mac entry %4d: %02x:%02x:%02x:%02x:%02x:%02x %x\n",
+ loffset + i, mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5],
+ entry->dlid_sd);
+
+ /* if the entry is being removed, do not add it */
+ if (!memcmp(mac_addr, empty_mac, ARRAY_SIZE(empty_mac)))
+ continue;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ rc = -ENOMEM;
+ goto updt_done;
+ }
+
+ node->index = loffset + i;
+ nentry = &node->entry;
+ memcpy(nentry->mac_addr, entry->mac_addr,
+ ARRAY_SIZE(nentry->mac_addr));
+ memcpy(nentry->mac_addr_mask, entry->mac_addr_mask,
+ ARRAY_SIZE(nentry->mac_addr_mask));
+ nentry->dlid_sd = be32_to_cpu(entry->dlid_sd);
+ key = node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX];
+ vnic_hash_add(new_mactbl, &node->hlist, key);
+ }
+
+ /* add other entries from current mac table to new mac table */
+ old_mactbl = rcu_access_pointer(adapter->mactbl);
+ if (!old_mactbl)
+ goto switch_tbl;
+
+ vnic_hash_for_each(old_mactbl, bkt, node, hlist) {
+ if ((node->index >= loffset) &&
+ (node->index < (loffset + lnum_entries)))
+ continue;
+
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node) {
+ rc = -ENOMEM;
+ goto updt_done;
+ }
+
+ new_node->index = node->index;
+ memcpy(&new_node->entry, &node->entry, sizeof(node->entry));
+ key = new_node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX];
+ vnic_hash_add(new_mactbl, &new_node->hlist, key);
+ }
+
+switch_tbl:
+ /* switch to new table */
+ rcu_assign_pointer(adapter->mactbl, new_mactbl);
+ synchronize_rcu();
+
+ adapter->info.vport.mac_tbl_digest = be32_to_cpu(tbl->mac_tbl_digest);
+updt_done:
+ /* upon failure, free the new table; otherwise, free the old table */
+ if (rc)
+ opa_vnic_free_mac_tbl(new_mactbl);
+ else
+ opa_vnic_free_mac_tbl(old_mactbl);
+
+ mutex_unlock(&adapter->mactbl_lock);
+ return rc;
+}
+
+/* opa_vnic_chk_mac_tbl - check mac table for dlid */
+static uint32_t opa_vnic_chk_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct ethhdr *mac_hdr)
+{
+ struct opa_vnic_mac_tbl_node *node;
+ struct hlist_head *mactbl;
+ u32 dlid = 0;
+ u8 key;
+
+ rcu_read_lock();
+ mactbl = rcu_dereference(adapter->mactbl);
+ if (unlikely(!mactbl))
+ goto chk_done;
+
+ key = mac_hdr->h_dest[OPA_VNIC_MAC_HASH_IDX];
+ vnic_hash_for_each_possible(mactbl, node, hlist, key) {
+ struct __opa_vnic_mactable_entry *entry = &node->entry;
+
+ /* if related to source mac, skip */
+ if (unlikely(OPA_VNIC_DLID_SD_IS_SRC_MAC(entry->dlid_sd)))
+ continue;
+
+ if (!memcmp(node->entry.mac_addr, mac_hdr->h_dest,
+ ARRAY_SIZE(node->entry.mac_addr))) {
+ /* mac address found */
+ dlid = OPA_VNIC_DLID_SD_GET_DLID(node->entry.dlid_sd);
+ break;
+ }
+ }
+
+chk_done:
+ rcu_read_unlock();
+ return dlid;
+}
+
+/* opa_vnic_get_dlid - find and return the DLID */
+static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
+ struct sk_buff *skb, u8 def_port)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ u32 dlid;
+
+ dlid = opa_vnic_chk_mac_tbl(adapter, mac_hdr);
+ if (dlid)
+ return dlid;
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest)) {
+ dlid = info->vesw.u_mcast_dlid;
+ } else {
+ if (is_local_ether_addr(mac_hdr->h_dest)) {
+ dlid = ((uint32_t)mac_hdr->h_dest[5] << 16) |
+ ((uint32_t)mac_hdr->h_dest[4] << 8) |
+ mac_hdr->h_dest[3];
+ if (unlikely(!dlid))
+ v_warn("Null dlid in MAC address\n");
+ } else if (def_port != OPA_VNIC_INVALID_PORT) {
+ if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
+ dlid = info->vesw.u_ucast_dlid[def_port];
+ }
+ }
+
+ return dlid;
+}
+
+/* opa_vnic_get_sc - return the service class */
+static u8 opa_vnic_get_sc(struct __opa_veswport_info *info,
+ struct sk_buff *skb)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ u16 vlan_tci;
+ u8 sc;
+
+ if (!__vlan_get_tag(skb, &vlan_tci)) {
+ u8 pcp = OPA_VNIC_VLAN_PCP(vlan_tci);
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ sc = info->vport.pcp_to_sc_mc[pcp];
+ else
+ sc = info->vport.pcp_to_sc_uc[pcp];
+ } else {
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ sc = info->vport.non_vlan_sc_mc;
+ else
+ sc = info->vport.non_vlan_sc_uc;
+ }
+
+ return sc;
+}
+
+u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
+{
+ struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+ struct __opa_veswport_info *info = &adapter->info;
+ u8 vl;
+
+ if (skb_vlan_tag_present(skb)) {
+ u8 pcp = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
+
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ vl = info->vport.pcp_to_vl_mc[pcp];
+ else
+ vl = info->vport.pcp_to_vl_uc[pcp];
+ } else {
+ if (is_multicast_ether_addr(mac_hdr->h_dest))
+ vl = info->vport.non_vlan_vl_mc;
+ else
+ vl = info->vport.non_vlan_vl_uc;
+ }
+
+ return vl;
+}
+
+/* opa_vnic_get_rc - return the routing control */
+static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
+ struct sk_buff *skb)
+{
+ u8 proto, rout_ctrl;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IPV6):
+ proto = ipv6_hdr(skb)->nexthdr;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV6);
+ break;
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV4);
+ break;
+ default:
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, DEFAULT);
+ }
+
+ return rout_ctrl;
+}
+
+/* opa_vnic_calc_entropy - calculate the packet entropy */
+u8 opa_vnic_calc_entropy(struct sk_buff *skb)
+{
+ u32 hash = skb_get_hash(skb);
+
+ /* store XOR of all bytes in lower 8 bits */
+ hash ^= hash >> 8;
+ hash ^= hash >> 16;
+
+ /* return lower 8 bits as entropy */
+ return (u8)(hash & 0xFF);
+}
+
+/* opa_vnic_get_def_port - get default port based on entropy */
+static inline u8 opa_vnic_get_def_port(struct opa_vnic_adapter *adapter,
+ u8 entropy)
+{
+ u8 flow_id;
+
+ /* Add the upper and lower 4-bits of entropy to get the flow id */
+ flow_id = ((entropy & 0xf) + (entropy >> 4));
+ return adapter->flow_tbl[flow_id & (OPA_VNIC_FLOW_TBL_SIZE - 1)];
+}
+
+/* Calculate packet length including OPA header, crc and padding */
+static inline int opa_vnic_wire_length(struct sk_buff *skb)
+{
+ u32 pad_len;
+
+ /* padding for 8 bytes size alignment */
+ pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7;
+ pad_len += OPA_VNIC_ICRC_TAIL_LEN;
+
+ return (skb->len + pad_len) >> 3;
+}
+
+/* opa_vnic_encap_skb - encapsulate skb packet with OPA header and meta data */
+void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct opa_vnic_skb_mdata *mdata;
+ u8 def_port, sc, rc, entropy, *hdr;
+ u16 len, l4_hdr;
+ u32 dlid;
+
+ hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
+
+ entropy = opa_vnic_calc_entropy(skb);
+ def_port = opa_vnic_get_def_port(adapter, entropy);
+ len = opa_vnic_wire_length(skb);
+ dlid = opa_vnic_get_dlid(adapter, skb, def_port);
+ sc = opa_vnic_get_sc(info, skb);
+ rc = opa_vnic_get_rc(info, skb);
+ l4_hdr = info->vesw.vesw_id;
+
+ mdata = skb_push(skb, sizeof(*mdata));
+ mdata->vl = opa_vnic_get_vl(adapter, skb);
+ mdata->entropy = entropy;
+ mdata->flags = 0;
+ if (unlikely(!dlid)) {
+ mdata->flags = OPA_VNIC_SKB_MDATA_ENCAP_ERR;
+ return;
+ }
+
+ opa_vnic_make_header(hdr, info->vport.encap_slid, dlid, len,
+ info->vesw.pkey, entropy, sc, rc,
+ OPA_VNIC_L4_ETHR, l4_hdr);
+}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
new file mode 100644
index 000000000..012fc27c5
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -0,0 +1,524 @@
+#ifndef _OPA_VNIC_ENCAP_H
+#define _OPA_VNIC_ENCAP_H
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains all OPA VNIC declaration required for encapsulation
+ * and decapsulation of Ethernet packets
+ */
+
+#include <linux/types.h>
+#include <rdma/ib_mad.h>
+
+/* EMA class version */
+#define OPA_EMA_CLASS_VERSION 0x80
+
+/*
+ * Define the Intel vendor management class for OPA
+ * ETHERNET MANAGEMENT
+ */
+#define OPA_MGMT_CLASS_INTEL_EMA 0x34
+
+/* EM attribute IDs */
+#define OPA_EM_ATTR_CLASS_PORT_INFO 0x0001
+#define OPA_EM_ATTR_VESWPORT_INFO 0x0011
+#define OPA_EM_ATTR_VESWPORT_MAC_ENTRIES 0x0012
+#define OPA_EM_ATTR_IFACE_UCAST_MACS 0x0013
+#define OPA_EM_ATTR_IFACE_MCAST_MACS 0x0014
+#define OPA_EM_ATTR_DELETE_VESW 0x0015
+#define OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS 0x0020
+#define OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS 0x0022
+
+/* VNIC configured and operational state values */
+#define OPA_VNIC_STATE_DROP_ALL 0x1
+#define OPA_VNIC_STATE_FORWARDING 0x3
+
+#define OPA_VESW_MAX_NUM_DEF_PORT 16
+#define OPA_VNIC_MAX_NUM_PCP 8
+
+#define OPA_VNIC_EMA_DATA (OPA_MGMT_MAD_SIZE - IB_MGMT_VENDOR_HDR)
+
+/* Defines for vendor specific notice(trap) attributes */
+#define OPA_INTEL_EMA_NOTICE_TYPE_INFO 0x04
+
+/* INTEL OUI */
+#define INTEL_OUI_1 0x00
+#define INTEL_OUI_2 0x06
+#define INTEL_OUI_3 0x6a
+
+/* Trap opcodes sent from VNIC */
+#define OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE 0x1
+#define OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE 0x2
+#define OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE 0x3
+
+#define OPA_VNIC_DLID_SD_IS_SRC_MAC(dlid_sd) (!!((dlid_sd) & 0x20))
+#define OPA_VNIC_DLID_SD_GET_DLID(dlid_sd) ((dlid_sd) >> 8)
+
+/* VNIC Ethernet link status */
+#define OPA_VNIC_ETH_LINK_UP 1
+#define OPA_VNIC_ETH_LINK_DOWN 2
+
+/* routing control */
+#define OPA_VNIC_ENCAP_RC_DEFAULT 0
+#define OPA_VNIC_ENCAP_RC_IPV4 4
+#define OPA_VNIC_ENCAP_RC_IPV4_UDP 8
+#define OPA_VNIC_ENCAP_RC_IPV4_TCP 12
+#define OPA_VNIC_ENCAP_RC_IPV6 16
+#define OPA_VNIC_ENCAP_RC_IPV6_TCP 20
+#define OPA_VNIC_ENCAP_RC_IPV6_UDP 24
+
+#define OPA_VNIC_ENCAP_RC_EXT(w, b) (((w) >> OPA_VNIC_ENCAP_RC_ ## b) & 0x7)
+
+/**
+ * struct opa_vesw_info - OPA vnic switch information
+ * @fabric_id: 10-bit fabric id
+ * @vesw_id: 12-bit virtual ethernet switch id
+ * @rsvd0: reserved bytes
+ * @def_port_mask: bitmask of default ports
+ * @rsvd1: reserved bytes
+ * @pkey: partition key
+ * @rsvd2: reserved bytes
+ * @u_mcast_dlid: unknown multicast dlid
+ * @u_ucast_dlid: array of unknown unicast dlids
+ * @rsvd3: reserved bytes
+ * @rc: routing control
+ * @eth_mtu: Ethernet MTU
+ * @rsvd4: reserved bytes
+ */
+struct opa_vesw_info {
+ __be16 fabric_id;
+ __be16 vesw_id;
+
+ u8 rsvd0[6];
+ __be16 def_port_mask;
+
+ u8 rsvd1[2];
+ __be16 pkey;
+
+ u8 rsvd2[4];
+ __be32 u_mcast_dlid;
+ __be32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
+
+ __be32 rc;
+
+ u8 rsvd3[56];
+ __be16 eth_mtu;
+ u8 rsvd4[2];
+} __packed;
+
+/**
+ * struct opa_per_veswport_info - OPA vnic per port information
+ * @port_num: port number
+ * @eth_link_status: current ethernet link state
+ * @rsvd0: reserved bytes
+ * @base_mac_addr: base mac address
+ * @config_state: configured port state
+ * @oper_state: operational port state
+ * @max_mac_tbl_ent: max number of mac table entries
+ * @max_smac_ent: max smac entries in mac table
+ * @mac_tbl_digest: mac table digest
+ * @rsvd1: reserved bytes
+ * @encap_slid: base slid for the port
+ * @pcp_to_sc_uc: sc by pcp index for unicast ethernet packets
+ * @pcp_to_vl_uc: vl by pcp index for unicast ethernet packets
+ * @pcp_to_sc_mc: sc by pcp index for multicast ethernet packets
+ * @pcp_to_vl_mc: vl by pcp index for multicast ethernet packets
+ * @non_vlan_sc_uc: sc for non-vlan unicast ethernet packets
+ * @non_vlan_vl_uc: vl for non-vlan unicast ethernet packets
+ * @non_vlan_sc_mc: sc for non-vlan multicast ethernet packets
+ * @non_vlan_vl_mc: vl for non-vlan multicast ethernet packets
+ * @rsvd2: reserved bytes
+ * @uc_macs_gen_count: generation count for unicast macs list
+ * @mc_macs_gen_count: generation count for multicast macs list
+ * @rsvd3: reserved bytes
+ */
+struct opa_per_veswport_info {
+ __be32 port_num;
+
+ u8 eth_link_status;
+ u8 rsvd0[3];
+
+ u8 base_mac_addr[ETH_ALEN];
+ u8 config_state;
+ u8 oper_state;
+
+ __be16 max_mac_tbl_ent;
+ __be16 max_smac_ent;
+ __be32 mac_tbl_digest;
+ u8 rsvd1[4];
+
+ __be32 encap_slid;
+
+ u8 pcp_to_sc_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_sc_mc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_mc[OPA_VNIC_MAX_NUM_PCP];
+
+ u8 non_vlan_sc_uc;
+ u8 non_vlan_vl_uc;
+ u8 non_vlan_sc_mc;
+ u8 non_vlan_vl_mc;
+
+ u8 rsvd2[48];
+
+ __be16 uc_macs_gen_count;
+ __be16 mc_macs_gen_count;
+
+ u8 rsvd3[8];
+} __packed;
+
+/**
+ * struct opa_veswport_info - OPA vnic port information
+ * @vesw: OPA vnic switch information
+ * @vport: OPA vnic per port information
+ *
+ * On host, each of the virtual ethernet ports belongs
+ * to a different virtual ethernet switches.
+ */
+struct opa_veswport_info {
+ struct opa_vesw_info vesw;
+ struct opa_per_veswport_info vport;
+};
+
+/**
+ * struct opa_veswport_mactable_entry - single entry in the forwarding table
+ * @mac_addr: MAC address
+ * @mac_addr_mask: MAC address bit mask
+ * @dlid_sd: Matching DLID and side data
+ *
+ * On the host each virtual ethernet port will have
+ * a forwarding table. These tables are used to
+ * map a MAC to a LID and other data. For more
+ * details see struct opa_veswport_mactable_entries.
+ * This is the structure of a single mactable entry
+ */
+struct opa_veswport_mactable_entry {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+ __be32 dlid_sd;
+} __packed;
+
+/**
+ * struct opa_veswport_mactable - Forwarding table array
+ * @offset: mac table starting offset
+ * @num_entries: Number of entries to get or set
+ * @mac_tbl_digest: mac table digest
+ * @tbl_entries: Array of table entries
+ *
+ * The EM sends down this structure in a MAD indicating
+ * the starting offset in the forwarding table that this
+ * entry is to be loaded into and the number of entries
+ * that that this MAD instance contains
+ * The mac_tbl_digest has been added to this MAD structure. It will be set by
+ * the EM and it will be used by the EM to check if there are any
+ * discrepancies with this value and the value
+ * maintained by the EM in the case of VNIC port being deleted or unloaded
+ * A new instantiation of a VNIC will always have a value of zero.
+ * This value is stored as part of the vnic adapter structure and will be
+ * accessed by the GET and SET routines for both the mactable entries and the
+ * veswport info.
+ */
+struct opa_veswport_mactable {
+ __be16 offset;
+ __be16 num_entries;
+ __be32 mac_tbl_digest;
+ struct opa_veswport_mactable_entry tbl_entries[];
+} __packed;
+
+/**
+ * struct opa_veswport_summary_counters - summary counters
+ * @vp_instance: vport instance on the OPA port
+ * @vesw_id: virtual ethernet switch id
+ * @veswport_num: virtual ethernet switch port number
+ * @tx_errors: transmit errors
+ * @rx_errors: receive errors
+ * @tx_packets: transmit packets
+ * @rx_packets: receive packets
+ * @tx_bytes: transmit bytes
+ * @rx_bytes: receive bytes
+ * @tx_unicast: unicast packets transmitted
+ * @tx_mcastbcast: multicast/broadcast packets transmitted
+ * @tx_untagged: non-vlan packets transmitted
+ * @tx_vlan: vlan packets transmitted
+ * @tx_64_size: transmit packet length is 64 bytes
+ * @tx_65_127: transmit packet length is >=65 and < 127 bytes
+ * @tx_128_255: transmit packet length is >=128 and < 255 bytes
+ * @tx_256_511: transmit packet length is >=256 and < 511 bytes
+ * @tx_512_1023: transmit packet length is >=512 and < 1023 bytes
+ * @tx_1024_1518: transmit packet length is >=1024 and < 1518 bytes
+ * @tx_1519_max: transmit packet length >= 1519 bytes
+ * @rx_unicast: unicast packets received
+ * @rx_mcastbcast: multicast/broadcast packets received
+ * @rx_untagged: non-vlan packets received
+ * @rx_vlan: vlan packets received
+ * @rx_64_size: received packet length is 64 bytes
+ * @rx_65_127: received packet length is >=65 and < 127 bytes
+ * @rx_128_255: received packet length is >=128 and < 255 bytes
+ * @rx_256_511: received packet length is >=256 and < 511 bytes
+ * @rx_512_1023: received packet length is >=512 and < 1023 bytes
+ * @rx_1024_1518: received packet length is >=1024 and < 1518 bytes
+ * @rx_1519_max: received packet length >= 1519 bytes
+ * @reserved: reserved bytes
+ *
+ * All the above are counters of corresponding conditions.
+ */
+struct opa_veswport_summary_counters {
+ __be16 vp_instance;
+ __be16 vesw_id;
+ __be32 veswport_num;
+
+ __be64 tx_errors;
+ __be64 rx_errors;
+ __be64 tx_packets;
+ __be64 rx_packets;
+ __be64 tx_bytes;
+ __be64 rx_bytes;
+
+ __be64 tx_unicast;
+ __be64 tx_mcastbcast;
+
+ __be64 tx_untagged;
+ __be64 tx_vlan;
+
+ __be64 tx_64_size;
+ __be64 tx_65_127;
+ __be64 tx_128_255;
+ __be64 tx_256_511;
+ __be64 tx_512_1023;
+ __be64 tx_1024_1518;
+ __be64 tx_1519_max;
+
+ __be64 rx_unicast;
+ __be64 rx_mcastbcast;
+
+ __be64 rx_untagged;
+ __be64 rx_vlan;
+
+ __be64 rx_64_size;
+ __be64 rx_65_127;
+ __be64 rx_128_255;
+ __be64 rx_256_511;
+ __be64 rx_512_1023;
+ __be64 rx_1024_1518;
+ __be64 rx_1519_max;
+
+ __be64 reserved[16];
+} __packed;
+
+/**
+ * struct opa_veswport_error_counters - error counters
+ * @vp_instance: vport instance on the OPA port
+ * @vesw_id: virtual ethernet switch id
+ * @veswport_num: virtual ethernet switch port number
+ * @tx_errors: transmit errors
+ * @rx_errors: receive errors
+ * @rsvd0: reserved bytes
+ * @tx_smac_filt: smac filter errors
+ * @rsvd1: reserved bytes
+ * @rsvd2: reserved bytes
+ * @rsvd3: reserved bytes
+ * @tx_dlid_zero: transmit packets with invalid dlid
+ * @rsvd4: reserved bytes
+ * @tx_logic: other transmit errors
+ * @rsvd5: reserved bytes
+ * @tx_drop_state: packet tansmission in non-forward port state
+ * @rx_bad_veswid: received packet with invalid vesw id
+ * @rsvd6: reserved bytes
+ * @rx_runt: received ethernet packet with length < 64 bytes
+ * @rx_oversize: received ethernet packet with length > MTU size
+ * @rsvd7: reserved bytes
+ * @rx_eth_down: received packets when interface is down
+ * @rx_drop_state: received packets in non-forwarding port state
+ * @rx_logic: other receive errors
+ * @rsvd8: reserved bytes
+ * @rsvd9: reserved bytes
+ *
+ * All the above are counters of corresponding error conditions.
+ */
+struct opa_veswport_error_counters {
+ __be16 vp_instance;
+ __be16 vesw_id;
+ __be32 veswport_num;
+
+ __be64 tx_errors;
+ __be64 rx_errors;
+
+ __be64 rsvd0;
+ __be64 tx_smac_filt;
+ __be64 rsvd1;
+ __be64 rsvd2;
+ __be64 rsvd3;
+ __be64 tx_dlid_zero;
+ __be64 rsvd4;
+ __be64 tx_logic;
+ __be64 rsvd5;
+ __be64 tx_drop_state;
+
+ __be64 rx_bad_veswid;
+ __be64 rsvd6;
+ __be64 rx_runt;
+ __be64 rx_oversize;
+ __be64 rsvd7;
+ __be64 rx_eth_down;
+ __be64 rx_drop_state;
+ __be64 rx_logic;
+ __be64 rsvd8;
+
+ __be64 rsvd9[16];
+} __packed;
+
+/**
+ * struct opa_veswport_trap - Trap message sent to EM by VNIC
+ * @fabric_id: 10 bit fabric id
+ * @veswid: 12 bit virtual ethernet switch id
+ * @veswportnum: logical port number on the Virtual switch
+ * @opaportnum: physical port num (redundant on host)
+ * @veswportindex: switch port index on opa port 0 based
+ * @opcode: operation
+ * @reserved: 32 bit for alignment
+ *
+ * The VNIC will send trap messages to the Ethernet manager to
+ * inform it about changes to the VNIC config, behaviour etc.
+ * This is the format of the trap payload.
+ */
+struct opa_veswport_trap {
+ __be16 fabric_id;
+ __be16 veswid;
+ __be32 veswportnum;
+ __be16 opaportnum;
+ u8 veswportindex;
+ u8 opcode;
+ __be32 reserved;
+} __packed;
+
+/**
+ * struct opa_vnic_iface_mac_entry - single entry in the mac list
+ * @mac_addr: MAC address
+ */
+struct opa_vnic_iface_mac_entry {
+ u8 mac_addr[ETH_ALEN];
+};
+
+/**
+ * struct opa_veswport_iface_macs - Msg to set globally administered MAC
+ * @start_idx: position of first entry (0 based)
+ * @num_macs_in_msg: number of MACs in this message
+ * @tot_macs_in_lst: The total number of MACs the agent has
+ * @gen_count: gen_count to indicate change
+ * @entry: The mac list entry
+ *
+ * Same attribute IDS and attribute modifiers as in locally administered
+ * addresses used to set globally administered addresses
+ */
+struct opa_veswport_iface_macs {
+ __be16 start_idx;
+ __be16 num_macs_in_msg;
+ __be16 tot_macs_in_lst;
+ __be16 gen_count;
+ struct opa_vnic_iface_mac_entry entry[];
+} __packed;
+
+/**
+ * struct opa_vnic_vema_mad - Generic VEMA MAD
+ * @mad_hdr: Generic MAD header
+ * @rmpp_hdr: RMPP header for vendor specific MADs
+ * @reserved: reserved bytes
+ * @oui: Unique org identifier
+ * @data: MAD data
+ */
+struct opa_vnic_vema_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ u8 reserved;
+ u8 oui[3];
+ u8 data[OPA_VNIC_EMA_DATA];
+};
+
+/**
+ * struct opa_vnic_notice_attr - Generic Notice MAD
+ * @gen_type: Generic/Specific bit and type of notice
+ * @oui_1: Vendor ID byte 1
+ * @oui_2: Vendor ID byte 2
+ * @oui_3: Vendor ID byte 3
+ * @trap_num: Trap number
+ * @toggle_count: Notice toggle bit and count value
+ * @issuer_lid: Trap issuer's lid
+ * @reserved: reserved bytes
+ * @issuer_gid: Issuer GID (only if Report method)
+ * @raw_data: Trap message body
+ */
+struct opa_vnic_notice_attr {
+ u8 gen_type;
+ u8 oui_1;
+ u8 oui_2;
+ u8 oui_3;
+ __be16 trap_num;
+ __be16 toggle_count;
+ __be32 issuer_lid;
+ __be32 reserved;
+ u8 issuer_gid[16];
+ u8 raw_data[64];
+} __packed;
+
+/**
+ * struct opa_vnic_vema_mad_trap - Generic VEMA MAD Trap
+ * @mad_hdr: Generic MAD header
+ * @rmpp_hdr: RMPP header for vendor specific MADs
+ * @reserved: reserved bytes
+ * @oui: Unique org identifier
+ * @notice: Notice structure
+ */
+struct opa_vnic_vema_mad_trap {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ u8 reserved;
+ u8 oui[3];
+ struct opa_vnic_notice_attr notice;
+};
+
+#endif /* _OPA_VNIC_ENCAP_H */
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
new file mode 100644
index 000000000..29b3d8fce
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC ethtool functions
+ */
+
+#include <linux/ethtool.h>
+
+#include "opa_vnic_internal.h"
+
+enum {NETDEV_STATS, VNIC_STATS};
+
+struct vnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ struct {
+ int sizeof_stat;
+ int stat_offset;
+ };
+};
+
+#define VNIC_STAT(m) { sizeof_field(struct opa_vnic_stats, m), \
+ offsetof(struct opa_vnic_stats, m) }
+
+static struct vnic_stats vnic_gstrings_stats[] = {
+ /* NETDEV stats */
+ {"rx_packets", VNIC_STAT(netstats.rx_packets)},
+ {"tx_packets", VNIC_STAT(netstats.tx_packets)},
+ {"rx_bytes", VNIC_STAT(netstats.rx_bytes)},
+ {"tx_bytes", VNIC_STAT(netstats.tx_bytes)},
+ {"rx_errors", VNIC_STAT(netstats.rx_errors)},
+ {"tx_errors", VNIC_STAT(netstats.tx_errors)},
+ {"rx_dropped", VNIC_STAT(netstats.rx_dropped)},
+ {"tx_dropped", VNIC_STAT(netstats.tx_dropped)},
+
+ /* SUMMARY counters */
+ {"tx_unicast", VNIC_STAT(tx_grp.unicast)},
+ {"tx_mcastbcast", VNIC_STAT(tx_grp.mcastbcast)},
+ {"tx_untagged", VNIC_STAT(tx_grp.untagged)},
+ {"tx_vlan", VNIC_STAT(tx_grp.vlan)},
+
+ {"tx_64_size", VNIC_STAT(tx_grp.s_64)},
+ {"tx_65_127", VNIC_STAT(tx_grp.s_65_127)},
+ {"tx_128_255", VNIC_STAT(tx_grp.s_128_255)},
+ {"tx_256_511", VNIC_STAT(tx_grp.s_256_511)},
+ {"tx_512_1023", VNIC_STAT(tx_grp.s_512_1023)},
+ {"tx_1024_1518", VNIC_STAT(tx_grp.s_1024_1518)},
+ {"tx_1519_max", VNIC_STAT(tx_grp.s_1519_max)},
+
+ {"rx_unicast", VNIC_STAT(rx_grp.unicast)},
+ {"rx_mcastbcast", VNIC_STAT(rx_grp.mcastbcast)},
+ {"rx_untagged", VNIC_STAT(rx_grp.untagged)},
+ {"rx_vlan", VNIC_STAT(rx_grp.vlan)},
+
+ {"rx_64_size", VNIC_STAT(rx_grp.s_64)},
+ {"rx_65_127", VNIC_STAT(rx_grp.s_65_127)},
+ {"rx_128_255", VNIC_STAT(rx_grp.s_128_255)},
+ {"rx_256_511", VNIC_STAT(rx_grp.s_256_511)},
+ {"rx_512_1023", VNIC_STAT(rx_grp.s_512_1023)},
+ {"rx_1024_1518", VNIC_STAT(rx_grp.s_1024_1518)},
+ {"rx_1519_max", VNIC_STAT(rx_grp.s_1519_max)},
+
+ /* ERROR counters */
+ {"rx_fifo_errors", VNIC_STAT(netstats.rx_fifo_errors)},
+ {"rx_length_errors", VNIC_STAT(netstats.rx_length_errors)},
+
+ {"tx_fifo_errors", VNIC_STAT(netstats.tx_fifo_errors)},
+ {"tx_carrier_errors", VNIC_STAT(netstats.tx_carrier_errors)},
+
+ {"tx_dlid_zero", VNIC_STAT(tx_dlid_zero)},
+ {"tx_drop_state", VNIC_STAT(tx_drop_state)},
+ {"rx_drop_state", VNIC_STAT(rx_drop_state)},
+ {"rx_oversize", VNIC_STAT(rx_oversize)},
+ {"rx_runt", VNIC_STAT(rx_runt)},
+};
+
+#define VNIC_STATS_LEN ARRAY_SIZE(vnic_gstrings_stats)
+
+/* vnic_get_drvinfo - get driver info */
+static void vnic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
+ sizeof(drvinfo->bus_info));
+}
+
+/* vnic_get_sset_count - get string set count */
+static int vnic_get_sset_count(struct net_device *netdev, int sset)
+{
+ return (sset == ETH_SS_STATS) ? VNIC_STATS_LEN : -EOPNOTSUPP;
+}
+
+/* vnic_get_ethtool_stats - get statistics */
+static void vnic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct opa_vnic_stats vstats;
+ int i;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+ for (i = 0; i < VNIC_STATS_LEN; i++) {
+ char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset;
+
+ data[i] = (vnic_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+/* vnic_get_strings - get strings */
+static void vnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < VNIC_STATS_LEN; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ vnic_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+}
+
+/* ethtool ops */
+static const struct ethtool_ops opa_vnic_ethtool_ops = {
+ .get_drvinfo = vnic_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = vnic_get_strings,
+ .get_sset_count = vnic_get_sset_count,
+ .get_ethtool_stats = vnic_get_ethtool_stats,
+};
+
+/* opa_vnic_set_ethtool_ops - set ethtool ops */
+void opa_vnic_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &opa_vnic_ethtool_ops;
+}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
new file mode 100644
index 000000000..dd942dd64
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -0,0 +1,329 @@
+#ifndef _OPA_VNIC_INTERNAL_H
+#define _OPA_VNIC_INTERNAL_H
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC driver internal declarations
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/hashtable.h>
+#include <linux/sizes.h>
+#include <rdma/opa_vnic.h>
+
+#include "opa_vnic_encap.h"
+
+#define OPA_VNIC_VLAN_PCP(vlan_tci) \
+ (((vlan_tci) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
+
+/* Flow to default port redirection table size */
+#define OPA_VNIC_FLOW_TBL_SIZE 32
+
+/* Invalid port number */
+#define OPA_VNIC_INVALID_PORT 0xff
+
+struct opa_vnic_adapter;
+
+/*
+ * struct __opa_vesw_info - OPA vnic virtual switch info
+ *
+ * Same as opa_vesw_info without bitwise attribute.
+ */
+struct __opa_vesw_info {
+ u16 fabric_id;
+ u16 vesw_id;
+
+ u8 rsvd0[6];
+ u16 def_port_mask;
+
+ u8 rsvd1[2];
+ u16 pkey;
+
+ u8 rsvd2[4];
+ u32 u_mcast_dlid;
+ u32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
+
+ u32 rc;
+
+ u8 rsvd3[56];
+ u16 eth_mtu;
+ u8 rsvd4[2];
+} __packed;
+
+/*
+ * struct __opa_per_veswport_info - OPA vnic per port info
+ *
+ * Same as opa_per_veswport_info without bitwise attribute.
+ */
+struct __opa_per_veswport_info {
+ u32 port_num;
+
+ u8 eth_link_status;
+ u8 rsvd0[3];
+
+ u8 base_mac_addr[ETH_ALEN];
+ u8 config_state;
+ u8 oper_state;
+
+ u16 max_mac_tbl_ent;
+ u16 max_smac_ent;
+ u32 mac_tbl_digest;
+ u8 rsvd1[4];
+
+ u32 encap_slid;
+
+ u8 pcp_to_sc_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_uc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_sc_mc[OPA_VNIC_MAX_NUM_PCP];
+ u8 pcp_to_vl_mc[OPA_VNIC_MAX_NUM_PCP];
+
+ u8 non_vlan_sc_uc;
+ u8 non_vlan_vl_uc;
+ u8 non_vlan_sc_mc;
+ u8 non_vlan_vl_mc;
+
+ u8 rsvd2[48];
+
+ u16 uc_macs_gen_count;
+ u16 mc_macs_gen_count;
+
+ u8 rsvd3[8];
+} __packed;
+
+/*
+ * struct __opa_veswport_info - OPA vnic port info
+ *
+ * Same as opa_veswport_info without bitwise attribute.
+ */
+struct __opa_veswport_info {
+ struct __opa_vesw_info vesw;
+ struct __opa_per_veswport_info vport;
+};
+
+/*
+ * struct __opa_veswport_trap - OPA vnic trap info
+ *
+ * Same as opa_veswport_trap without bitwise attribute.
+ */
+struct __opa_veswport_trap {
+ u16 fabric_id;
+ u16 veswid;
+ u32 veswportnum;
+ u16 opaportnum;
+ u8 veswportindex;
+ u8 opcode;
+ u32 reserved;
+} __packed;
+
+/**
+ * struct opa_vnic_ctrl_port - OPA virtual NIC control port
+ * @ibdev: pointer to ib device
+ * @ops: opa vnic control operations
+ * @num_ports: number of opa ports
+ */
+struct opa_vnic_ctrl_port {
+ struct ib_device *ibdev;
+ struct opa_vnic_ctrl_ops *ops;
+ u8 num_ports;
+};
+
+/**
+ * struct opa_vnic_adapter - OPA VNIC netdev private data structure
+ * @netdev: pointer to associated netdev
+ * @ibdev: ib device
+ * @cport: pointer to opa vnic control port
+ * @rn_ops: rdma netdev's net_device_ops
+ * @port_num: OPA port number
+ * @vport_num: vesw port number
+ * @lock: adapter lock
+ * @info: virtual ethernet switch port information
+ * @vema_mac_addr: mac address configured by vema
+ * @umac_hash: unicast maclist hash
+ * @mmac_hash: multicast maclist hash
+ * @mactbl: hash table of MAC entries
+ * @mactbl_lock: mac table lock
+ * @stats_lock: statistics lock
+ * @flow_tbl: flow to default port redirection table
+ * @trap_timeout: trap timeout
+ * @trap_count: no. of traps allowed within timeout period
+ */
+struct opa_vnic_adapter {
+ struct net_device *netdev;
+ struct ib_device *ibdev;
+ struct opa_vnic_ctrl_port *cport;
+ const struct net_device_ops *rn_ops;
+
+ u8 port_num;
+ u8 vport_num;
+
+ /* Lock used around concurrent updates to netdev */
+ struct mutex lock;
+
+ struct __opa_veswport_info info;
+ u8 vema_mac_addr[ETH_ALEN];
+ u32 umac_hash;
+ u32 mmac_hash;
+ struct hlist_head __rcu *mactbl;
+
+ /* Lock used to protect updates to mac table */
+ struct mutex mactbl_lock;
+
+ /* Lock used to protect access to vnic counters */
+ spinlock_t stats_lock;
+
+ u8 flow_tbl[OPA_VNIC_FLOW_TBL_SIZE];
+
+ unsigned long trap_timeout;
+ u8 trap_count;
+};
+
+/* Same as opa_veswport_mactable_entry, but without bitwise attribute */
+struct __opa_vnic_mactable_entry {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+ u32 dlid_sd;
+} __packed;
+
+/**
+ * struct opa_vnic_mac_tbl_node - OPA VNIC mac table node
+ * @hlist: hash list handle
+ * @index: index of entry in the mac table
+ * @entry: entry in the table
+ */
+struct opa_vnic_mac_tbl_node {
+ struct hlist_node hlist;
+ u16 index;
+ struct __opa_vnic_mactable_entry entry;
+};
+
+#define v_dbg(format, arg...) \
+ netdev_dbg(adapter->netdev, format, ## arg)
+#define v_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define v_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define v_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+
+#define c_err(format, arg...) \
+ dev_err(&cport->ibdev->dev, format, ## arg)
+#define c_info(format, arg...) \
+ dev_info(&cport->ibdev->dev, format, ## arg)
+#define c_dbg(format, arg...) \
+ dev_dbg(&cport->ibdev->dev, format, ## arg)
+
+/* The maximum allowed entries in the mac table */
+#define OPA_VNIC_MAC_TBL_MAX_ENTRIES 2048
+/* Limit of smac entries in mac table */
+#define OPA_VNIC_MAX_SMAC_LIMIT 256
+
+/* The last octet of the MAC address is used as the key to the hash table */
+#define OPA_VNIC_MAC_HASH_IDX 5
+
+/* The VNIC MAC hash table is of size 2^8 */
+#define OPA_VNIC_MAC_TBL_HASH_BITS 8
+#define OPA_VNIC_MAC_TBL_SIZE BIT(OPA_VNIC_MAC_TBL_HASH_BITS)
+
+/* VNIC HASH MACROS */
+#define vnic_hash_init(hashtable) __hash_init(hashtable, OPA_VNIC_MAC_TBL_SIZE)
+
+#define vnic_hash_add(hashtable, node, key) \
+ hlist_add_head(node, \
+ &hashtable[hash_min(key, ilog2(OPA_VNIC_MAC_TBL_SIZE))])
+
+#define vnic_hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; \
+ !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+#define vnic_hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, \
+ &name[hash_min(key, ilog2(OPA_VNIC_MAC_TBL_SIZE))], member)
+
+#define vnic_hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; \
+ !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+extern char opa_vnic_driver_name[];
+
+struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
+ u8 port_num, u8 vport_num);
+void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter);
+void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
+u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
+u8 opa_vnic_calc_entropy(struct sk_buff *skb);
+void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter);
+void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter);
+void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl);
+int opa_vnic_update_mac_tbl(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_mactable *tbl);
+void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs);
+void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs);
+void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_summary_counters *cntrs);
+void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_error_counters *cntrs);
+void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info);
+void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info);
+void opa_vnic_get_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info);
+void opa_vnic_set_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info);
+void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event);
+void opa_vnic_set_ethtool_ops(struct net_device *netdev);
+void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
+ struct __opa_veswport_trap *data, u32 lid);
+
+#endif /* _OPA_VNIC_INTERNAL_H */
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
new file mode 100644
index 000000000..071f35711
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA Virtual Network Interface Controller (VNIC) driver
+ * netdev functionality.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+
+#include "opa_vnic_internal.h"
+
+#define OPA_TX_TIMEOUT_MS 1000
+
+#define OPA_VNIC_SKB_HEADROOM \
+ ALIGN((OPA_VNIC_HDR_LEN + OPA_VNIC_SKB_MDATA_LEN), 8)
+
+/* This function is overloaded for opa_vnic specific implementation */
+static void opa_vnic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct opa_vnic_stats vstats;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+ memcpy(stats, &vstats.netstats, sizeof(*stats));
+}
+
+/* opa_netdev_start_xmit - transmit function */
+static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+
+ v_dbg("xmit: queue %d skb len %d\n", skb->queue_mapping, skb->len);
+ /* pad to ensure mininum ethernet packet length */
+ if (unlikely(skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
+ opa_vnic_encap_skb(adapter, skb);
+ return adapter->rn_ops->ndo_start_xmit(skb, netdev);
+}
+
+static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct opa_vnic_skb_mdata *mdata;
+ int rc;
+
+ /* pass entropy and vl as metadata in skb */
+ mdata = skb_push(skb, sizeof(*mdata));
+ mdata->entropy = opa_vnic_calc_entropy(skb);
+ mdata->vl = opa_vnic_get_vl(adapter, skb);
+ rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
+ skb_pull(skb, sizeof(*mdata));
+ return rc;
+}
+
+static void opa_vnic_update_state(struct opa_vnic_adapter *adapter, bool up)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+
+ mutex_lock(&adapter->lock);
+ /* Operational state can only be DROP_ALL or FORWARDING */
+ if ((info->vport.config_state == OPA_VNIC_STATE_FORWARDING) && up) {
+ info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ } else {
+ info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ }
+
+ if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING)
+ netif_dormant_off(adapter->netdev);
+ else
+ netif_dormant_on(adapter->netdev);
+ mutex_unlock(&adapter->lock);
+}
+
+/* opa_vnic_process_vema_config - process vema configuration updates */
+void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct rdma_netdev *rn = netdev_priv(adapter->netdev);
+ u8 port_num[OPA_VESW_MAX_NUM_DEF_PORT] = { 0 };
+ struct net_device *netdev = adapter->netdev;
+ u8 i, port_count = 0;
+ u16 port_mask;
+
+ /* If the base_mac_addr is changed, update the interface mac address */
+ if (memcmp(info->vport.base_mac_addr, adapter->vema_mac_addr,
+ ARRAY_SIZE(info->vport.base_mac_addr))) {
+ struct sockaddr saddr;
+
+ memcpy(saddr.sa_data, info->vport.base_mac_addr,
+ ARRAY_SIZE(info->vport.base_mac_addr));
+ mutex_lock(&adapter->lock);
+ eth_commit_mac_addr_change(netdev, &saddr);
+ memcpy(adapter->vema_mac_addr,
+ info->vport.base_mac_addr, ETH_ALEN);
+ mutex_unlock(&adapter->lock);
+ }
+
+ rn->set_id(netdev, info->vesw.vesw_id);
+
+ /* Handle MTU limit change */
+ rtnl_lock();
+ netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu,
+ netdev->min_mtu);
+ if (netdev->mtu > netdev->max_mtu)
+ dev_set_mtu(netdev, netdev->max_mtu);
+ rtnl_unlock();
+
+ /* Update flow to default port redirection table */
+ port_mask = info->vesw.def_port_mask;
+ for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++) {
+ if (port_mask & 1)
+ port_num[port_count++] = i;
+ port_mask >>= 1;
+ }
+
+ /*
+ * Build the flow table. Flow table is required when destination LID
+ * is not available. Up to OPA_VNIC_FLOW_TBL_SIZE flows supported.
+ * Each flow need a default port number to get its dlid from the
+ * u_ucast_dlid array.
+ */
+ for (i = 0; i < OPA_VNIC_FLOW_TBL_SIZE; i++)
+ adapter->flow_tbl[i] = port_count ? port_num[i % port_count] :
+ OPA_VNIC_INVALID_PORT;
+
+ /* update state */
+ opa_vnic_update_state(adapter, !!(netdev->flags & IFF_UP));
+}
+
+/*
+ * Set the power on default values in adapter's vema interface structure.
+ */
+static inline void opa_vnic_set_pod_values(struct opa_vnic_adapter *adapter)
+{
+ adapter->info.vport.max_mac_tbl_ent = OPA_VNIC_MAC_TBL_MAX_ENTRIES;
+ adapter->info.vport.max_smac_ent = OPA_VNIC_MAX_SMAC_LIMIT;
+ adapter->info.vport.config_state = OPA_VNIC_STATE_DROP_ALL;
+ adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ adapter->info.vesw.eth_mtu = ETH_DATA_LEN;
+}
+
+/* opa_vnic_set_mac_addr - change mac address */
+static int opa_vnic_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct sockaddr *sa = addr;
+ int rc;
+
+ if (!memcmp(netdev->dev_addr, sa->sa_data, ETH_ALEN))
+ return 0;
+
+ mutex_lock(&adapter->lock);
+ rc = eth_mac_addr(netdev, addr);
+ mutex_unlock(&adapter->lock);
+ if (rc)
+ return rc;
+
+ adapter->info.vport.uc_macs_gen_count++;
+ opa_vnic_vema_report_event(adapter,
+ OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE);
+ return 0;
+}
+
+/*
+ * opa_vnic_mac_send_event - post event on possible mac list exchange
+ * Send trap when digest from uc/mc mac list differs from previous run.
+ * Digest is evaluated similar to how cksum does.
+ */
+static void opa_vnic_mac_send_event(struct net_device *netdev, u8 event)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ struct netdev_hw_addr *ha;
+ struct netdev_hw_addr_list *hw_list;
+ u32 *ref_crc;
+ u32 l, crc = 0;
+
+ switch (event) {
+ case OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE:
+ hw_list = &netdev->uc;
+ adapter->info.vport.uc_macs_gen_count++;
+ ref_crc = &adapter->umac_hash;
+ break;
+ case OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE:
+ hw_list = &netdev->mc;
+ adapter->info.vport.mc_macs_gen_count++;
+ ref_crc = &adapter->mmac_hash;
+ break;
+ default:
+ return;
+ }
+ netdev_hw_addr_list_for_each(ha, hw_list) {
+ crc = crc32_le(crc, ha->addr, ETH_ALEN);
+ }
+ l = netdev_hw_addr_list_count(hw_list) * ETH_ALEN;
+ crc = ~crc32_le(crc, (void *)&l, sizeof(l));
+
+ if (crc != *ref_crc) {
+ *ref_crc = crc;
+ opa_vnic_vema_report_event(adapter, event);
+ }
+}
+
+/* opa_vnic_set_rx_mode - handle uc/mc mac list change */
+static void opa_vnic_set_rx_mode(struct net_device *netdev)
+{
+ opa_vnic_mac_send_event(netdev,
+ OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE);
+
+ opa_vnic_mac_send_event(netdev,
+ OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE);
+}
+
+/* opa_netdev_open - activate network interface */
+static int opa_netdev_open(struct net_device *netdev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ int rc;
+
+ rc = adapter->rn_ops->ndo_open(adapter->netdev);
+ if (rc) {
+ v_dbg("open failed %d\n", rc);
+ return rc;
+ }
+
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, true);
+ opa_vnic_vema_report_event(adapter,
+ OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
+ return 0;
+}
+
+/* opa_netdev_close - disable network interface */
+static int opa_netdev_close(struct net_device *netdev)
+{
+ struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
+ int rc;
+
+ rc = adapter->rn_ops->ndo_stop(adapter->netdev);
+ if (rc) {
+ v_dbg("close failed %d\n", rc);
+ return rc;
+ }
+
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, false);
+ opa_vnic_vema_report_event(adapter,
+ OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
+ return 0;
+}
+
+/* netdev ops */
+static const struct net_device_ops opa_netdev_ops = {
+ .ndo_open = opa_netdev_open,
+ .ndo_stop = opa_netdev_close,
+ .ndo_start_xmit = opa_netdev_start_xmit,
+ .ndo_get_stats64 = opa_vnic_get_stats64,
+ .ndo_set_rx_mode = opa_vnic_set_rx_mode,
+ .ndo_select_queue = opa_vnic_select_queue,
+ .ndo_set_mac_address = opa_vnic_set_mac_addr,
+};
+
+/* opa_vnic_add_netdev - create vnic netdev interface */
+struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
+ u8 port_num, u8 vport_num)
+{
+ struct opa_vnic_adapter *adapter;
+ struct net_device *netdev;
+ struct rdma_netdev *rn;
+ int rc;
+
+ netdev = ibdev->ops.alloc_rdma_netdev(ibdev, port_num,
+ RDMA_NETDEV_OPA_VNIC,
+ "veth%d", NET_NAME_UNKNOWN,
+ ether_setup);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+ else if (IS_ERR(netdev))
+ return ERR_CAST(netdev);
+
+ rn = netdev_priv(netdev);
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ rc = -ENOMEM;
+ goto adapter_err;
+ }
+
+ rn->clnt_priv = adapter;
+ rn->hca = ibdev;
+ rn->port_num = port_num;
+ adapter->netdev = netdev;
+ adapter->ibdev = ibdev;
+ adapter->port_num = port_num;
+ adapter->vport_num = vport_num;
+ adapter->rn_ops = netdev->netdev_ops;
+
+ netdev->netdev_ops = &opa_netdev_ops;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM;
+ mutex_init(&adapter->lock);
+ mutex_init(&adapter->mactbl_lock);
+ spin_lock_init(&adapter->stats_lock);
+
+ SET_NETDEV_DEV(netdev, ibdev->dev.parent);
+
+ opa_vnic_set_ethtool_ops(netdev);
+
+ opa_vnic_set_pod_values(adapter);
+
+ rc = register_netdev(netdev);
+ if (rc)
+ goto netdev_err;
+
+ netif_carrier_off(netdev);
+ netif_dormant_on(netdev);
+ v_info("initialized\n");
+
+ return adapter;
+netdev_err:
+ mutex_destroy(&adapter->lock);
+ mutex_destroy(&adapter->mactbl_lock);
+ kfree(adapter);
+adapter_err:
+ rn->free_rdma_netdev(netdev);
+
+ return ERR_PTR(rc);
+}
+
+/* opa_vnic_rem_netdev - remove vnic netdev interface */
+void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct rdma_netdev *rn = netdev_priv(netdev);
+
+ v_info("removing\n");
+ unregister_netdev(netdev);
+ opa_vnic_release_mac_tbl(adapter);
+ mutex_destroy(&adapter->lock);
+ mutex_destroy(&adapter->mactbl_lock);
+ kfree(adapter);
+ rn->free_rdma_netdev(netdev);
+}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
new file mode 100644
index 000000000..21c6cea8b
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -0,0 +1,1056 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPX Virtual Network Interface Controller (VNIC)
+ * Ethernet Management Agent (EMA) driver
+ */
+
+#include <linux/module.h>
+#include <linux/xarray.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/opa_smi.h>
+#include <rdma/opa_port_info.h>
+
+#include "opa_vnic_internal.h"
+
+char opa_vnic_driver_name[] = "opa_vnic";
+
+/*
+ * The trap service level is kept in bits 3 to 7 in the trap_sl_rsvd
+ * field in the class port info MAD.
+ */
+#define GET_TRAP_SL_FROM_CLASS_PORT_INFO(x) (((x) >> 3) & 0x1f)
+
+/* Cap trap bursts to a reasonable limit good for normal cases */
+#define OPA_VNIC_TRAP_BURST_LIMIT 4
+
+/*
+ * VNIC trap limit timeout.
+ * Inverse of cap2_mask response time out (1.0737 secs) = 0.9
+ * secs approx IB spec 13.4.6.2.1 PortInfoSubnetTimeout and
+ * 13.4.9 Traps.
+ */
+#define OPA_VNIC_TRAP_TIMEOUT ((4096 * (1UL << 18)) / 1000)
+
+#define OPA_VNIC_UNSUP_ATTR \
+ cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
+
+#define OPA_VNIC_INVAL_ATTR \
+ cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
+
+#define OPA_VNIC_CLASS_CAP_TRAP 0x1
+
+/* Maximum number of VNIC ports supported */
+#define OPA_VNIC_MAX_NUM_VPORT 255
+
+/**
+ * struct opa_vnic_vema_port -- VNIC VEMA port details
+ * @cport: pointer to port
+ * @mad_agent: pointer to mad agent for port
+ * @class_port_info: Class port info information.
+ * @tid: Transaction id
+ * @port_num: OPA port number
+ * @vports: vnic ports
+ * @event_handler: ib event handler
+ * @lock: adapter interface lock
+ */
+struct opa_vnic_vema_port {
+ struct opa_vnic_ctrl_port *cport;
+ struct ib_mad_agent *mad_agent;
+ struct opa_class_port_info class_port_info;
+ u64 tid;
+ u8 port_num;
+ struct xarray vports;
+ struct ib_event_handler event_handler;
+
+ /* Lock to query/update network adapter */
+ struct mutex lock;
+};
+
+static int opa_vnic_vema_add_one(struct ib_device *device);
+static void opa_vnic_vema_rem_one(struct ib_device *device,
+ void *client_data);
+
+static struct ib_client opa_vnic_client = {
+ .name = opa_vnic_driver_name,
+ .add = opa_vnic_vema_add_one,
+ .remove = opa_vnic_vema_rem_one,
+};
+
+/**
+ * vema_get_vport_num -- Get the vnic from the mad
+ * @recvd_mad: Received mad
+ *
+ * Return: returns value of the vnic port number
+ */
+static inline u8 vema_get_vport_num(struct opa_vnic_vema_mad *recvd_mad)
+{
+ return be32_to_cpu(recvd_mad->mad_hdr.attr_mod) & 0xff;
+}
+
+/**
+ * vema_get_vport_adapter -- Get vnic port adapter from recvd mad
+ * @recvd_mad: received mad
+ * @port: ptr to port struct on which MAD was recvd
+ *
+ * Return: vnic adapter
+ */
+static inline struct opa_vnic_adapter *
+vema_get_vport_adapter(struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_port *port)
+{
+ u8 vport_num = vema_get_vport_num(recvd_mad);
+
+ return xa_load(&port->vports, vport_num);
+}
+
+/**
+ * vema_mac_tbl_req_ok -- Check if mac request has correct values
+ * @mac_tbl: mac table
+ *
+ * This function checks for the validity of the offset and number of
+ * entries required.
+ *
+ * Return: true if offset and num_entries are valid
+ */
+static inline bool vema_mac_tbl_req_ok(struct opa_veswport_mactable *mac_tbl)
+{
+ u16 offset, num_entries;
+ u16 req_entries = ((OPA_VNIC_EMA_DATA - sizeof(*mac_tbl)) /
+ sizeof(mac_tbl->tbl_entries[0]));
+
+ offset = be16_to_cpu(mac_tbl->offset);
+ num_entries = be16_to_cpu(mac_tbl->num_entries);
+
+ return ((num_entries <= req_entries) &&
+ (offset + num_entries <= OPA_VNIC_MAC_TBL_MAX_ENTRIES));
+}
+
+/*
+ * Return the power on default values in the port info structure
+ * in big endian format as required by MAD.
+ */
+static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
+{
+ memset(port_info, 0, sizeof(*port_info));
+ port_info->vport.max_mac_tbl_ent =
+ cpu_to_be16(OPA_VNIC_MAC_TBL_MAX_ENTRIES);
+ port_info->vport.max_smac_ent =
+ cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
+ port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
+ port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
+ port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
+}
+
+/**
+ * vema_add_vport -- Add a new vnic port
+ * @port: ptr to opa_vnic_vema_port struct
+ * @vport_num: vnic port number (to be added)
+ *
+ * Return a pointer to the vnic adapter structure
+ */
+static struct opa_vnic_adapter *vema_add_vport(struct opa_vnic_vema_port *port,
+ u8 vport_num)
+{
+ struct opa_vnic_ctrl_port *cport = port->cport;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = opa_vnic_add_netdev(cport->ibdev, port->port_num, vport_num);
+ if (!IS_ERR(adapter)) {
+ int rc;
+
+ adapter->cport = cport;
+ rc = xa_insert(&port->vports, vport_num, adapter, GFP_KERNEL);
+ if (rc < 0) {
+ opa_vnic_rem_netdev(adapter);
+ adapter = ERR_PTR(rc);
+ }
+ }
+
+ return adapter;
+}
+
+/**
+ * vema_get_class_port_info -- Get class info for port
+ * @port: Port on whic MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function copies the latest class port info value set for the
+ * port and stores it for generating traps
+ */
+static void vema_get_class_port_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_class_port_info *port_info;
+
+ port_info = (struct opa_class_port_info *)rsp_mad->data;
+ memcpy(port_info, &port->class_port_info, sizeof(*port_info));
+ port_info->base_version = OPA_MGMT_BASE_VERSION;
+ port_info->class_version = OPA_EMA_CLASS_VERSION;
+
+ /*
+ * Set capability mask bit indicating agent generates traps,
+ * and set the maximum number of VNIC ports supported.
+ */
+ port_info->cap_mask = cpu_to_be16((OPA_VNIC_CLASS_CAP_TRAP |
+ (OPA_VNIC_MAX_NUM_VPORT << 8)));
+
+ /*
+ * Since a get routine is always sent by the EM first we
+ * set the expected response time to
+ * 4.096 usec * 2^18 == 1.0737 sec here.
+ */
+ port_info->cap_mask2_resp_time = cpu_to_be32(18);
+}
+
+/**
+ * vema_set_class_port_info -- Get class info for port
+ * @port: Port on whic MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function updates the port class info for the specific vnic
+ * and sets up the response mad data
+ */
+static void vema_set_class_port_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ memcpy(&port->class_port_info, recvd_mad->data,
+ sizeof(port->class_port_info));
+
+ vema_get_class_port_info(port, recvd_mad, rsp_mad);
+}
+
+/**
+ * vema_get_veswport_info -- Get veswport info
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ */
+static void vema_get_veswport_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_info *port_info =
+ (struct opa_veswport_info *)rsp_mad->data;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (adapter) {
+ memset(port_info, 0, sizeof(*port_info));
+ opa_vnic_get_vesw_info(adapter, &port_info->vesw);
+ opa_vnic_get_per_veswport_info(adapter,
+ &port_info->vport);
+ } else {
+ vema_get_pod_values(port_info);
+ }
+}
+
+/**
+ * vema_set_veswport_info -- Set veswport info
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function gets the port class infor for vnic
+ */
+static void vema_set_veswport_info(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_vnic_ctrl_port *cport = port->cport;
+ struct opa_veswport_info *port_info;
+ struct opa_vnic_adapter *adapter;
+ u8 vport_num;
+
+ vport_num = vema_get_vport_num(recvd_mad);
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ adapter = vema_add_vport(port, vport_num);
+ if (IS_ERR(adapter)) {
+ c_err("failed to add vport %d: %ld\n",
+ vport_num, PTR_ERR(adapter));
+ goto err_exit;
+ }
+ }
+
+ port_info = (struct opa_veswport_info *)recvd_mad->data;
+ opa_vnic_set_vesw_info(adapter, &port_info->vesw);
+ opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
+
+ /* Process the new config settings */
+ opa_vnic_process_vema_config(adapter);
+
+ vema_get_veswport_info(port, recvd_mad, rsp_mad);
+ return;
+
+err_exit:
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+}
+
+/**
+ * vema_get_mac_entries -- Get MAC entries in VNIC MAC table
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function gets the MAC entries that are programmed into
+ * the VNIC MAC forwarding table. It checks for the validity of
+ * the index into the MAC table and the number of entries that
+ * are to be retrieved.
+ */
+static void vema_get_mac_entries(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_mactable *mac_tbl_in, *mac_tbl_out;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ mac_tbl_in = (struct opa_veswport_mactable *)recvd_mad->data;
+ mac_tbl_out = (struct opa_veswport_mactable *)rsp_mad->data;
+
+ if (vema_mac_tbl_req_ok(mac_tbl_in)) {
+ mac_tbl_out->offset = mac_tbl_in->offset;
+ mac_tbl_out->num_entries = mac_tbl_in->num_entries;
+ opa_vnic_query_mac_tbl(adapter, mac_tbl_out);
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ }
+}
+
+/**
+ * vema_set_mac_entries -- Set MAC entries in VNIC MAC table
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function sets the MAC entries in the VNIC forwarding table
+ * It checks for the validity of the index and the number of forwarding
+ * table entries to be programmed.
+ */
+static void vema_set_mac_entries(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_mactable *mac_tbl;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ mac_tbl = (struct opa_veswport_mactable *)recvd_mad->data;
+ if (vema_mac_tbl_req_ok(mac_tbl)) {
+ if (opa_vnic_update_mac_tbl(adapter, mac_tbl))
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ }
+ vema_get_mac_entries(port, recvd_mad, rsp_mad);
+}
+
+/**
+ * vema_set_delete_vesw -- Reset VESW info to POD values
+ * @port: source port on which MAD was received
+ * @recvd_mad: pointer to the received mad
+ * @rsp_mad: pointer to respose mad
+ *
+ * This function clears all the fields of veswport info for the requested vesw
+ * and sets them back to the power-on default values. It does not delete the
+ * vesw.
+ */
+static void vema_set_delete_vesw(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_info *port_info =
+ (struct opa_veswport_info *)rsp_mad->data;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ vema_get_pod_values(port_info);
+ opa_vnic_set_vesw_info(adapter, &port_info->vesw);
+ opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
+
+ /* Process the new config settings */
+ opa_vnic_process_vema_config(adapter);
+
+ opa_vnic_release_mac_tbl(adapter);
+
+ vema_get_veswport_info(port, recvd_mad, rsp_mad);
+}
+
+/**
+ * vema_get_mac_list -- Get the unicast/multicast macs.
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ * @attr_id: Attribute ID indicating multicast or unicast mac list
+ */
+static void vema_get_mac_list(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad,
+ u16 attr_id)
+{
+ struct opa_veswport_iface_macs *macs_in, *macs_out;
+ int max_entries = (OPA_VNIC_EMA_DATA - sizeof(*macs_out)) / ETH_ALEN;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (!adapter) {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ return;
+ }
+
+ macs_in = (struct opa_veswport_iface_macs *)recvd_mad->data;
+ macs_out = (struct opa_veswport_iface_macs *)rsp_mad->data;
+
+ macs_out->start_idx = macs_in->start_idx;
+ if (macs_in->num_macs_in_msg)
+ macs_out->num_macs_in_msg = macs_in->num_macs_in_msg;
+ else
+ macs_out->num_macs_in_msg = cpu_to_be16(max_entries);
+
+ if (attr_id == OPA_EM_ATTR_IFACE_MCAST_MACS)
+ opa_vnic_query_mcast_macs(adapter, macs_out);
+ else
+ opa_vnic_query_ucast_macs(adapter, macs_out);
+}
+
+/**
+ * vema_get_summary_counters -- Gets summary counters.
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_get_summary_counters(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_summary_counters *cntrs;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (adapter) {
+ cntrs = (struct opa_veswport_summary_counters *)rsp_mad->data;
+ opa_vnic_get_summary_counters(adapter, cntrs);
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ }
+}
+
+/**
+ * vema_get_error_counters -- Gets summary counters.
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_get_error_counters(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ struct opa_veswport_error_counters *cntrs;
+ struct opa_vnic_adapter *adapter;
+
+ adapter = vema_get_vport_adapter(recvd_mad, port);
+ if (adapter) {
+ cntrs = (struct opa_veswport_error_counters *)rsp_mad->data;
+ opa_vnic_get_error_counters(adapter, cntrs);
+ } else {
+ rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
+ }
+}
+
+/**
+ * vema_get -- Process received get MAD
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_get(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
+
+ switch (attr_id) {
+ case OPA_EM_ATTR_CLASS_PORT_INFO:
+ vema_get_class_port_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_INFO:
+ vema_get_veswport_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
+ vema_get_mac_entries(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_IFACE_UCAST_MACS:
+ case OPA_EM_ATTR_IFACE_MCAST_MACS:
+ vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id);
+ break;
+ case OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS:
+ vema_get_summary_counters(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS:
+ vema_get_error_counters(port, recvd_mad, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ break;
+ }
+}
+
+/**
+ * vema_set -- Process received set MAD
+ * @port: source port on which MAD was received
+ * @recvd_mad: Received mad contains fields to set vnic parameters
+ * @rsp_mad: Response mad to be built
+ */
+static void vema_set(struct opa_vnic_vema_port *port,
+ struct opa_vnic_vema_mad *recvd_mad,
+ struct opa_vnic_vema_mad *rsp_mad)
+{
+ u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
+
+ switch (attr_id) {
+ case OPA_EM_ATTR_CLASS_PORT_INFO:
+ vema_set_class_port_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_INFO:
+ vema_set_veswport_info(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
+ vema_set_mac_entries(port, recvd_mad, rsp_mad);
+ break;
+ case OPA_EM_ATTR_DELETE_VESW:
+ vema_set_delete_vesw(port, recvd_mad, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ break;
+ }
+}
+
+/**
+ * vema_send -- Send handler for VEMA MAD agent
+ * @mad_agent: pointer to the mad agent
+ * @mad_wc: pointer to mad send work completion information
+ *
+ * Free all the data structures associated with the sent MAD
+ */
+static void vema_send(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_wc *mad_wc)
+{
+ rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
+ ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * vema_recv -- Recv handler for VEMA MAD agent
+ * @mad_agent: pointer to the mad agent
+ * @send_buf: Send buffer if found, else NULL
+ * @mad_wc: pointer to mad send work completion information
+ *
+ * Handle only set and get methods and respond to other methods
+ * as unsupported. Allocate response buffer and address handle
+ * for the response MAD.
+ */
+static void vema_recv(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_buf *send_buf,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct opa_vnic_vema_port *port;
+ struct ib_ah *ah;
+ struct ib_mad_send_buf *rsp;
+ struct opa_vnic_vema_mad *vema_mad;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad)
+ return;
+
+ port = mad_agent->context;
+ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+ mad_wc->recv_buf.grh, mad_agent->port_num);
+ if (IS_ERR(ah))
+ goto free_recv_mad;
+
+ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+ mad_wc->wc->pkey_index, 0,
+ IB_MGMT_VENDOR_HDR, OPA_VNIC_EMA_DATA,
+ GFP_KERNEL, OPA_MGMT_BASE_VERSION);
+ if (IS_ERR(rsp))
+ goto err_rsp;
+
+ rsp->ah = ah;
+ vema_mad = rsp->mad;
+ memcpy(vema_mad, mad_wc->recv_buf.mad, IB_MGMT_VENDOR_HDR);
+ vema_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+ vema_mad->mad_hdr.status = 0;
+
+ /* Lock ensures network adapter is not removed */
+ mutex_lock(&port->lock);
+
+ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ vema_get(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
+ vema_mad);
+ break;
+ case IB_MGMT_METHOD_SET:
+ vema_set(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
+ vema_mad);
+ break;
+ default:
+ vema_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
+ break;
+ }
+ mutex_unlock(&port->lock);
+
+ if (!ib_post_send_mad(rsp, NULL)) {
+ /*
+ * with post send successful ah and send mad
+ * will be destroyed in send handler
+ */
+ goto free_recv_mad;
+ }
+
+ ib_free_send_mad(rsp);
+
+err_rsp:
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
+free_recv_mad:
+ ib_free_recv_mad(mad_wc);
+}
+
+/**
+ * vema_get_port -- Gets the opa_vnic_vema_port
+ * @cport: pointer to control dev
+ * @port_num: Port number
+ *
+ * This function loops through the ports and returns
+ * the opa_vnic_vema port structure that is associated
+ * with the OPA port number
+ *
+ * Return: ptr to requested opa_vnic_vema_port strucure
+ * if success, NULL if not
+ */
+static struct opa_vnic_vema_port *
+vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
+{
+ struct opa_vnic_vema_port *port = (void *)cport + sizeof(*cport);
+
+ if (port_num > cport->num_ports)
+ return NULL;
+
+ return port + (port_num - 1);
+}
+
+/**
+ * opa_vnic_vema_send_trap -- This function sends a trap to the EM
+ * @adapter: pointer to vnic adapter
+ * @data: pointer to trap data filled by calling function
+ * @lid: issuers lid (encap_slid from vesw_port_info)
+ *
+ * This function is called from the VNIC driver to send a trap if there
+ * is somethng the EM should be notified about. These events currently
+ * are
+ * 1) UNICAST INTERFACE MACADDRESS changes
+ * 2) MULTICAST INTERFACE MACADDRESS changes
+ * 3) ETHERNET LINK STATUS changes
+ * While allocating the send mad the remote site qpn used is 1
+ * as this is the well known QP.
+ *
+ */
+void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
+ struct __opa_veswport_trap *data, u32 lid)
+{
+ struct opa_vnic_ctrl_port *cport = adapter->cport;
+ struct ib_mad_send_buf *send_buf;
+ struct opa_vnic_vema_port *port;
+ struct ib_device *ibp;
+ struct opa_vnic_vema_mad_trap *trap_mad;
+ struct opa_class_port_info *class;
+ struct rdma_ah_attr ah_attr;
+ struct ib_ah *ah;
+ struct opa_veswport_trap *trap;
+ u32 trap_lid;
+ u16 pkey_idx;
+
+ if (!cport)
+ goto err_exit;
+ ibp = cport->ibdev;
+ port = vema_get_port(cport, data->opaportnum);
+ if (!port || !port->mad_agent)
+ goto err_exit;
+
+ if (time_before(jiffies, adapter->trap_timeout)) {
+ if (adapter->trap_count == OPA_VNIC_TRAP_BURST_LIMIT) {
+ v_warn("Trap rate exceeded\n");
+ goto err_exit;
+ } else {
+ adapter->trap_count++;
+ }
+ } else {
+ adapter->trap_count = 0;
+ }
+
+ class = &port->class_port_info;
+ /* Set up address handle */
+ memset(&ah_attr, 0, sizeof(ah_attr));
+ ah_attr.type = rdma_ah_find_type(ibp, port->port_num);
+ rdma_ah_set_sl(&ah_attr,
+ GET_TRAP_SL_FROM_CLASS_PORT_INFO(class->trap_sl_rsvd));
+ rdma_ah_set_port_num(&ah_attr, port->port_num);
+ trap_lid = be32_to_cpu(class->trap_lid);
+ /*
+ * check for trap lid validity, must not be zero
+ * The trap sink could change after we fashion the MAD but since traps
+ * are not guaranteed we won't use a lock as anyway the change will take
+ * place even with locking.
+ */
+ if (!trap_lid) {
+ c_err("%s: Invalid dlid\n", __func__);
+ goto err_exit;
+ }
+
+ rdma_ah_set_dlid(&ah_attr, trap_lid);
+ ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr, 0);
+ if (IS_ERR(ah)) {
+ c_err("%s:Couldn't create new AH = %p\n", __func__, ah);
+ c_err("%s:dlid = %d, sl = %d, port = %d\n", __func__,
+ rdma_ah_get_dlid(&ah_attr), rdma_ah_get_sl(&ah_attr),
+ rdma_ah_get_port_num(&ah_attr));
+ goto err_exit;
+ }
+
+ if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_FULL,
+ &pkey_idx) < 0) {
+ c_err("%s:full key not found, defaulting to partial\n",
+ __func__);
+ if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_PARTIAL,
+ &pkey_idx) < 0)
+ pkey_idx = 1;
+ }
+
+ send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0,
+ IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC, OPA_MGMT_BASE_VERSION);
+ if (IS_ERR(send_buf)) {
+ c_err("%s:Couldn't allocate send buf\n", __func__);
+ goto err_sndbuf;
+ }
+
+ send_buf->ah = ah;
+
+ /* Set up common MAD hdr */
+ trap_mad = send_buf->mad;
+ trap_mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
+ trap_mad->mad_hdr.mgmt_class = OPA_MGMT_CLASS_INTEL_EMA;
+ trap_mad->mad_hdr.class_version = OPA_EMA_CLASS_VERSION;
+ trap_mad->mad_hdr.method = IB_MGMT_METHOD_TRAP;
+ port->tid++;
+ trap_mad->mad_hdr.tid = cpu_to_be64(port->tid);
+ trap_mad->mad_hdr.attr_id = IB_SMP_ATTR_NOTICE;
+
+ /* Set up vendor OUI */
+ trap_mad->oui[0] = INTEL_OUI_1;
+ trap_mad->oui[1] = INTEL_OUI_2;
+ trap_mad->oui[2] = INTEL_OUI_3;
+
+ /* Setup notice attribute portion */
+ trap_mad->notice.gen_type = OPA_INTEL_EMA_NOTICE_TYPE_INFO << 1;
+ trap_mad->notice.oui_1 = INTEL_OUI_1;
+ trap_mad->notice.oui_2 = INTEL_OUI_2;
+ trap_mad->notice.oui_3 = INTEL_OUI_3;
+ trap_mad->notice.issuer_lid = cpu_to_be32(lid);
+
+ /* copy the actual trap data */
+ trap = (struct opa_veswport_trap *)trap_mad->notice.raw_data;
+ trap->fabric_id = cpu_to_be16(data->fabric_id);
+ trap->veswid = cpu_to_be16(data->veswid);
+ trap->veswportnum = cpu_to_be32(data->veswportnum);
+ trap->opaportnum = cpu_to_be16(data->opaportnum);
+ trap->veswportindex = data->veswportindex;
+ trap->opcode = data->opcode;
+
+ /* If successful send set up rate limit timeout else bail */
+ if (ib_post_send_mad(send_buf, NULL)) {
+ ib_free_send_mad(send_buf);
+ } else {
+ if (adapter->trap_count)
+ return;
+ adapter->trap_timeout = jiffies +
+ usecs_to_jiffies(OPA_VNIC_TRAP_TIMEOUT);
+ return;
+ }
+
+err_sndbuf:
+ rdma_destroy_ah(ah, 0);
+err_exit:
+ v_err("Aborting trap\n");
+}
+
+static void opa_vnic_event(struct ib_event_handler *handler,
+ struct ib_event *record)
+{
+ struct opa_vnic_vema_port *port =
+ container_of(handler, struct opa_vnic_vema_port, event_handler);
+ struct opa_vnic_ctrl_port *cport = port->cport;
+ struct opa_vnic_adapter *adapter;
+ unsigned long index;
+
+ if (record->element.port_num != port->port_num)
+ return;
+
+ c_dbg("OPA_VNIC received event %d on device %s port %d\n",
+ record->event, dev_name(&record->device->dev),
+ record->element.port_num);
+
+ if (record->event != IB_EVENT_PORT_ERR &&
+ record->event != IB_EVENT_PORT_ACTIVE)
+ return;
+
+ xa_for_each(&port->vports, index, adapter) {
+ if (record->event == IB_EVENT_PORT_ACTIVE)
+ netif_carrier_on(adapter->netdev);
+ else
+ netif_carrier_off(adapter->netdev);
+ }
+}
+
+/**
+ * vema_unregister -- Unregisters agent
+ * @cport: pointer to control port
+ *
+ * This deletes the registration by VEMA for MADs
+ */
+static void vema_unregister(struct opa_vnic_ctrl_port *cport)
+{
+ struct opa_vnic_adapter *adapter;
+ unsigned long index;
+ int i;
+
+ for (i = 1; i <= cport->num_ports; i++) {
+ struct opa_vnic_vema_port *port = vema_get_port(cport, i);
+
+ if (!port->mad_agent)
+ continue;
+
+ /* Lock ensures no MAD is being processed */
+ mutex_lock(&port->lock);
+ xa_for_each(&port->vports, index, adapter)
+ opa_vnic_rem_netdev(adapter);
+ mutex_unlock(&port->lock);
+
+ ib_unregister_mad_agent(port->mad_agent);
+ port->mad_agent = NULL;
+ mutex_destroy(&port->lock);
+ xa_destroy(&port->vports);
+ ib_unregister_event_handler(&port->event_handler);
+ }
+}
+
+/**
+ * vema_register -- Registers agent
+ * @cport: pointer to control port
+ *
+ * This function registers the handlers for the VEMA MADs
+ *
+ * Return: returns 0 on success. non zero otherwise
+ */
+static int vema_register(struct opa_vnic_ctrl_port *cport)
+{
+ struct ib_mad_reg_req reg_req = {
+ .mgmt_class = OPA_MGMT_CLASS_INTEL_EMA,
+ .mgmt_class_version = OPA_MGMT_BASE_VERSION,
+ .oui = { INTEL_OUI_1, INTEL_OUI_2, INTEL_OUI_3 }
+ };
+ int i;
+
+ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+ /* register ib event handler and mad agent for each port on dev */
+ for (i = 1; i <= cport->num_ports; i++) {
+ struct opa_vnic_vema_port *port = vema_get_port(cport, i);
+ int ret;
+
+ port->cport = cport;
+ port->port_num = i;
+
+ INIT_IB_EVENT_HANDLER(&port->event_handler,
+ cport->ibdev, opa_vnic_event);
+ ib_register_event_handler(&port->event_handler);
+
+ xa_init(&port->vports);
+ mutex_init(&port->lock);
+ port->mad_agent = ib_register_mad_agent(cport->ibdev, i,
+ IB_QPT_GSI, &reg_req,
+ IB_MGMT_RMPP_VERSION,
+ vema_send, vema_recv,
+ port, 0);
+ if (IS_ERR(port->mad_agent)) {
+ ret = PTR_ERR(port->mad_agent);
+ port->mad_agent = NULL;
+ mutex_destroy(&port->lock);
+ vema_unregister(cport);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * opa_vnic_ctrl_config_dev -- This function sends a trap to the EM
+ * by way of ib_modify_port to indicate support for ethernet on the
+ * fabric.
+ * @cport: pointer to control port
+ * @en: enable or disable ethernet on fabric support
+ */
+static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
+{
+ struct ib_port_modify pm = { 0 };
+ int i;
+
+ if (en)
+ pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
+ else
+ pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
+
+ for (i = 1; i <= cport->num_ports; i++)
+ ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm);
+}
+
+/**
+ * opa_vnic_vema_add_one -- Handle new ib device
+ * @device: ib device pointer
+ *
+ * Allocate the vnic control port and initialize it.
+ */
+static int opa_vnic_vema_add_one(struct ib_device *device)
+{
+ struct opa_vnic_ctrl_port *cport;
+ int rc, size = sizeof(*cport);
+
+ if (!rdma_cap_opa_vnic(device))
+ return -EOPNOTSUPP;
+
+ size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
+ cport = kzalloc(size, GFP_KERNEL);
+ if (!cport)
+ return -ENOMEM;
+
+ cport->num_ports = device->phys_port_cnt;
+ cport->ibdev = device;
+
+ /* Initialize opa vnic management agent (vema) */
+ rc = vema_register(cport);
+ if (!rc)
+ c_info("VNIC client initialized\n");
+
+ ib_set_client_data(device, &opa_vnic_client, cport);
+ opa_vnic_ctrl_config_dev(cport, true);
+ return 0;
+}
+
+/**
+ * opa_vnic_vema_rem_one -- Handle ib device removal
+ * @device: ib device pointer
+ * @client_data: ib client data
+ *
+ * Uninitialize and free the vnic control port.
+ */
+static void opa_vnic_vema_rem_one(struct ib_device *device,
+ void *client_data)
+{
+ struct opa_vnic_ctrl_port *cport = client_data;
+
+ c_info("removing VNIC client\n");
+ opa_vnic_ctrl_config_dev(cport, false);
+ vema_unregister(cport);
+ kfree(cport);
+}
+
+static int __init opa_vnic_init(void)
+{
+ int rc;
+
+ rc = ib_register_client(&opa_vnic_client);
+ if (rc)
+ pr_err("VNIC driver register failed %d\n", rc);
+
+ return rc;
+}
+module_init(opa_vnic_init);
+
+static void opa_vnic_deinit(void)
+{
+ ib_unregister_client(&opa_vnic_client);
+}
+module_exit(opa_vnic_deinit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Cornelis Networks");
+MODULE_DESCRIPTION("Cornelis OPX Virtual Network driver");
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
new file mode 100644
index 000000000..292c037aa
--- /dev/null
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains OPA VNIC EMA Interface functions.
+ */
+
+#include "opa_vnic_internal.h"
+
+/**
+ * opa_vnic_vema_report_event - sent trap to report the specified event
+ * @adapter: vnic port adapter
+ * @event: event to be reported
+ *
+ * This function calls vema api to sent a trap for the given event.
+ */
+void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+ struct __opa_veswport_trap trap_data;
+
+ trap_data.fabric_id = info->vesw.fabric_id;
+ trap_data.veswid = info->vesw.vesw_id;
+ trap_data.veswportnum = info->vport.port_num;
+ trap_data.opaportnum = adapter->port_num;
+ trap_data.veswportindex = adapter->vport_num;
+ trap_data.opcode = event;
+
+ opa_vnic_vema_send_trap(adapter, &trap_data, info->vport.encap_slid);
+}
+
+/**
+ * opa_vnic_get_summary_counters - get summary counters
+ * @adapter: vnic port adapter
+ * @cntrs: pointer to destination summary counters structure
+ *
+ * This function populates the summary counters that is maintained by the
+ * given adapter to destination address provided.
+ */
+void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_summary_counters *cntrs)
+{
+ struct opa_vnic_stats vstats;
+ __be64 *dst;
+ u64 *src;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+
+ cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
+ cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
+ cntrs->veswport_num = cpu_to_be32(adapter->port_num);
+
+ cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors);
+ cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors);
+ cntrs->tx_packets = cpu_to_be64(vstats.netstats.tx_packets);
+ cntrs->rx_packets = cpu_to_be64(vstats.netstats.rx_packets);
+ cntrs->tx_bytes = cpu_to_be64(vstats.netstats.tx_bytes);
+ cntrs->rx_bytes = cpu_to_be64(vstats.netstats.rx_bytes);
+
+ /*
+ * This loop depends on layout of
+ * opa_veswport_summary_counters opa_vnic_stats structures.
+ */
+ for (dst = &cntrs->tx_unicast, src = &vstats.tx_grp.unicast;
+ dst < &cntrs->reserved[0]; dst++, src++) {
+ *dst = cpu_to_be64(*src);
+ }
+}
+
+/**
+ * opa_vnic_get_error_counters - get error counters
+ * @adapter: vnic port adapter
+ * @cntrs: pointer to destination error counters structure
+ *
+ * This function populates the error counters that is maintained by the
+ * given adapter to destination address provided.
+ */
+void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_error_counters *cntrs)
+{
+ struct opa_vnic_stats vstats;
+
+ memset(&vstats, 0, sizeof(vstats));
+ spin_lock(&adapter->stats_lock);
+ adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
+
+ cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
+ cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
+ cntrs->veswport_num = cpu_to_be32(adapter->port_num);
+
+ cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors);
+ cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors);
+ cntrs->tx_dlid_zero = cpu_to_be64(vstats.tx_dlid_zero);
+ cntrs->tx_drop_state = cpu_to_be64(vstats.tx_drop_state);
+ cntrs->tx_logic = cpu_to_be64(vstats.netstats.tx_fifo_errors +
+ vstats.netstats.tx_carrier_errors);
+
+ cntrs->rx_bad_veswid = cpu_to_be64(vstats.netstats.rx_nohandler);
+ cntrs->rx_runt = cpu_to_be64(vstats.rx_runt);
+ cntrs->rx_oversize = cpu_to_be64(vstats.rx_oversize);
+ cntrs->rx_drop_state = cpu_to_be64(vstats.rx_drop_state);
+ cntrs->rx_logic = cpu_to_be64(vstats.netstats.rx_fifo_errors);
+}
+
+/**
+ * opa_vnic_get_vesw_info -- Get the vesw information
+ * @adapter: vnic port adapter
+ * @info: pointer to destination vesw info structure
+ *
+ * This function copies the vesw info that is maintained by the
+ * given adapter to destination address provided.
+ */
+void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info)
+{
+ struct __opa_vesw_info *src = &adapter->info.vesw;
+ int i;
+
+ info->fabric_id = cpu_to_be16(src->fabric_id);
+ info->vesw_id = cpu_to_be16(src->vesw_id);
+ memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0));
+ info->def_port_mask = cpu_to_be16(src->def_port_mask);
+ memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1));
+ info->pkey = cpu_to_be16(src->pkey);
+
+ memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2));
+ info->u_mcast_dlid = cpu_to_be32(src->u_mcast_dlid);
+ for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
+ info->u_ucast_dlid[i] = cpu_to_be32(src->u_ucast_dlid[i]);
+
+ info->rc = cpu_to_be32(src->rc);
+
+ memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
+ info->eth_mtu = cpu_to_be16(src->eth_mtu);
+ memcpy(info->rsvd4, src->rsvd4, ARRAY_SIZE(src->rsvd4));
+}
+
+/**
+ * opa_vnic_set_vesw_info -- Set the vesw information
+ * @adapter: vnic port adapter
+ * @info: pointer to vesw info structure
+ *
+ * This function updates the vesw info that is maintained by the
+ * given adapter with vesw info provided. Reserved fields are stored
+ * and returned back to EM as is.
+ */
+void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
+ struct opa_vesw_info *info)
+{
+ struct __opa_vesw_info *dst = &adapter->info.vesw;
+ int i;
+
+ dst->fabric_id = be16_to_cpu(info->fabric_id);
+ dst->vesw_id = be16_to_cpu(info->vesw_id);
+ memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0));
+ dst->def_port_mask = be16_to_cpu(info->def_port_mask);
+ memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1));
+ dst->pkey = be16_to_cpu(info->pkey);
+
+ memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2));
+ dst->u_mcast_dlid = be32_to_cpu(info->u_mcast_dlid);
+ for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
+ dst->u_ucast_dlid[i] = be32_to_cpu(info->u_ucast_dlid[i]);
+
+ dst->rc = be32_to_cpu(info->rc);
+
+ memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
+ dst->eth_mtu = be16_to_cpu(info->eth_mtu);
+ memcpy(dst->rsvd4, info->rsvd4, ARRAY_SIZE(info->rsvd4));
+}
+
+/**
+ * opa_vnic_get_per_veswport_info -- Get the vesw per port information
+ * @adapter: vnic port adapter
+ * @info: pointer to destination vport info structure
+ *
+ * This function copies the vesw per port info that is maintained by the
+ * given adapter to destination address provided.
+ * Note that the read only fields are not copied.
+ */
+void opa_vnic_get_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info)
+{
+ struct __opa_per_veswport_info *src = &adapter->info.vport;
+
+ info->port_num = cpu_to_be32(src->port_num);
+ info->eth_link_status = src->eth_link_status;
+ memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0));
+
+ memcpy(info->base_mac_addr, src->base_mac_addr,
+ ARRAY_SIZE(info->base_mac_addr));
+ info->config_state = src->config_state;
+ info->oper_state = src->oper_state;
+ info->max_mac_tbl_ent = cpu_to_be16(src->max_mac_tbl_ent);
+ info->max_smac_ent = cpu_to_be16(src->max_smac_ent);
+ info->mac_tbl_digest = cpu_to_be32(src->mac_tbl_digest);
+ memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1));
+
+ info->encap_slid = cpu_to_be32(src->encap_slid);
+ memcpy(info->pcp_to_sc_uc, src->pcp_to_sc_uc,
+ ARRAY_SIZE(info->pcp_to_sc_uc));
+ memcpy(info->pcp_to_vl_uc, src->pcp_to_vl_uc,
+ ARRAY_SIZE(info->pcp_to_vl_uc));
+ memcpy(info->pcp_to_sc_mc, src->pcp_to_sc_mc,
+ ARRAY_SIZE(info->pcp_to_sc_mc));
+ memcpy(info->pcp_to_vl_mc, src->pcp_to_vl_mc,
+ ARRAY_SIZE(info->pcp_to_vl_mc));
+ info->non_vlan_sc_uc = src->non_vlan_sc_uc;
+ info->non_vlan_vl_uc = src->non_vlan_vl_uc;
+ info->non_vlan_sc_mc = src->non_vlan_sc_mc;
+ info->non_vlan_vl_mc = src->non_vlan_vl_mc;
+ memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2));
+
+ info->uc_macs_gen_count = cpu_to_be16(src->uc_macs_gen_count);
+ info->mc_macs_gen_count = cpu_to_be16(src->mc_macs_gen_count);
+ memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
+}
+
+/**
+ * opa_vnic_set_per_veswport_info -- Set vesw per port information
+ * @adapter: vnic port adapter
+ * @info: pointer to vport info structure
+ *
+ * This function updates the vesw per port info that is maintained by the
+ * given adapter with vesw per port info provided. Reserved fields are
+ * stored and returned back to EM as is.
+ */
+void opa_vnic_set_per_veswport_info(struct opa_vnic_adapter *adapter,
+ struct opa_per_veswport_info *info)
+{
+ struct __opa_per_veswport_info *dst = &adapter->info.vport;
+
+ dst->port_num = be32_to_cpu(info->port_num);
+ memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0));
+
+ memcpy(dst->base_mac_addr, info->base_mac_addr,
+ ARRAY_SIZE(dst->base_mac_addr));
+ dst->config_state = info->config_state;
+ memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1));
+
+ dst->encap_slid = be32_to_cpu(info->encap_slid);
+ memcpy(dst->pcp_to_sc_uc, info->pcp_to_sc_uc,
+ ARRAY_SIZE(dst->pcp_to_sc_uc));
+ memcpy(dst->pcp_to_vl_uc, info->pcp_to_vl_uc,
+ ARRAY_SIZE(dst->pcp_to_vl_uc));
+ memcpy(dst->pcp_to_sc_mc, info->pcp_to_sc_mc,
+ ARRAY_SIZE(dst->pcp_to_sc_mc));
+ memcpy(dst->pcp_to_vl_mc, info->pcp_to_vl_mc,
+ ARRAY_SIZE(dst->pcp_to_vl_mc));
+ dst->non_vlan_sc_uc = info->non_vlan_sc_uc;
+ dst->non_vlan_vl_uc = info->non_vlan_vl_uc;
+ dst->non_vlan_sc_mc = info->non_vlan_sc_mc;
+ dst->non_vlan_vl_mc = info->non_vlan_vl_mc;
+ memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2));
+ memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
+}
+
+/**
+ * opa_vnic_query_mcast_macs - query multicast mac list
+ * @adapter: vnic port adapter
+ * @macs: pointer mac list
+ *
+ * This function populates the provided mac list with the configured
+ * multicast addresses in the adapter.
+ */
+void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs)
+{
+ u16 start_idx, num_macs, idx = 0, count = 0;
+ struct netdev_hw_addr *ha;
+
+ start_idx = be16_to_cpu(macs->start_idx);
+ num_macs = be16_to_cpu(macs->num_macs_in_msg);
+ netdev_for_each_mc_addr(ha, adapter->netdev) {
+ struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
+
+ if (start_idx > idx++)
+ continue;
+ else if (num_macs == count)
+ break;
+ memcpy(entry, ha->addr, sizeof(*entry));
+ count++;
+ }
+
+ macs->tot_macs_in_lst = cpu_to_be16(netdev_mc_count(adapter->netdev));
+ macs->num_macs_in_msg = cpu_to_be16(count);
+ macs->gen_count = cpu_to_be16(adapter->info.vport.mc_macs_gen_count);
+}
+
+/**
+ * opa_vnic_query_ucast_macs - query unicast mac list
+ * @adapter: vnic port adapter
+ * @macs: pointer mac list
+ *
+ * This function populates the provided mac list with the configured
+ * unicast addresses in the adapter.
+ */
+void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs)
+{
+ u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0;
+ struct netdev_hw_addr *ha;
+
+ start_idx = be16_to_cpu(macs->start_idx);
+ num_macs = be16_to_cpu(macs->num_macs_in_msg);
+ /* loop through dev_addrs list first */
+ for_each_dev_addr(adapter->netdev, ha) {
+ struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
+
+ /* Do not include EM specified MAC address */
+ if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr,
+ ARRAY_SIZE(adapter->info.vport.base_mac_addr))) {
+ em_macs++;
+ continue;
+ }
+
+ if (start_idx > idx++)
+ continue;
+ else if (num_macs == count)
+ break;
+ memcpy(entry, ha->addr, sizeof(*entry));
+ count++;
+ }
+
+ /* loop through uc list */
+ netdev_for_each_uc_addr(ha, adapter->netdev) {
+ struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
+
+ if (start_idx > idx++)
+ continue;
+ else if (num_macs == count)
+ break;
+ memcpy(entry, ha->addr, sizeof(*entry));
+ count++;
+ }
+
+ tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) +
+ netdev_uc_count(adapter->netdev) - em_macs;
+ macs->tot_macs_in_lst = cpu_to_be16(tot_macs);
+ macs->num_macs_in_msg = cpu_to_be16(count);
+ macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count);
+}
diff --git a/drivers/infiniband/ulp/rtrs/Kconfig b/drivers/infiniband/ulp/rtrs/Kconfig
new file mode 100644
index 000000000..9092b62e6
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config INFINIBAND_RTRS
+ tristate
+ depends on INFINIBAND_ADDR_TRANS
+
+config INFINIBAND_RTRS_CLIENT
+ tristate "RTRS client module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport client module.
+
+ RDMA Transport (RTRS) client implements a reliable transport layer
+ and also multipathing functionality and that it is intended to be
+ the base layer for a block storage initiator over RDMA.
+
+config INFINIBAND_RTRS_SERVER
+ tristate "RTRS server module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport server module.
+
+ RDMA Transport (RTRS) server module processing connection and IO
+ requests received from the RTRS client module, it will pass the
+ IO requests to its user eg. RNBD_server.
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
new file mode 100644
index 000000000..5227e7788
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS_rtrs-clt-trace.o = -I$(src)
+
+rtrs-client-y := rtrs-clt.o \
+ rtrs-clt-stats.o \
+ rtrs-clt-sysfs.o \
+ rtrs-clt-trace.o
+
+CFLAGS_rtrs-srv-trace.o = -I$(src)
+
+rtrs-server-y := rtrs-srv.o \
+ rtrs-srv-stats.o \
+ rtrs-srv-sysfs.o \
+ rtrs-srv-trace.o
+
+rtrs-core-y := rtrs.o
+
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs-core.o
+obj-$(CONFIG_INFINIBAND_RTRS_CLIENT) += rtrs-client.o
+obj-$(CONFIG_INFINIBAND_RTRS_SERVER) += rtrs-server.o
diff --git a/drivers/infiniband/ulp/rtrs/README b/drivers/infiniband/ulp/rtrs/README
new file mode 100644
index 000000000..5d9ea142e
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/README
@@ -0,0 +1,213 @@
+****************************
+RDMA Transport (RTRS)
+****************************
+
+RTRS (RDMA Transport) is a reliable high speed transport library
+which provides support to establish optimal number of connections
+between client and server machines using RDMA (InfiniBand, RoCE, iWarp)
+transport. It is optimized to transfer (read/write) IO blocks.
+
+In its core interface it follows the BIO semantics of providing the
+possibility to either write data from an sg list to the remote side
+or to request ("read") data transfer from the remote side into a given
+sg list.
+
+RTRS provides I/O fail-over and load-balancing capabilities by using
+multipath I/O (see "add_path" and "mp_policy" configuration entries in
+Documentation/ABI/testing/sysfs-class-rtrs-client).
+
+RTRS is used by the RNBD (RDMA Network Block Device) modules.
+
+==================
+Transport protocol
+==================
+
+Overview
+--------
+An established connection between a client and a server is called rtrs
+session. A session is associated with a set of memory chunks reserved on the
+server side for a given client for rdma transfer. A session
+consists of multiple paths, each representing a separate physical link
+between client and server. Those are used for load balancing and failover.
+Each path consists of as many connections (QPs) as there are cpus on
+the client.
+
+When processing an incoming write or read request, rtrs client uses memory
+chunks reserved for him on the server side. Their number, size and addresses
+need to be exchanged between client and server during the connection
+establishment phase. Apart from the memory related information client needs to
+inform the server about the session name and identify each path and connection
+individually.
+
+On an established session client sends to server write or read messages.
+Server uses immediate field to tell the client which request is being
+acknowledged and for errno. Client uses immediate field to tell the server
+which of the memory chunks has been accessed and at which offset the message
+can be found.
+
+Module parameter always_invalidate is introduced for the security problem
+discussed in LPC RDMA MC 2019. When always_invalidate=Y, on the server side we
+invalidate each rdma buffer before we hand it over to RNBD server and
+then pass it to the block layer. A new rkey is generated and registered for the
+buffer after it returns back from the block layer and RNBD server.
+The new rkey is sent back to the client along with the IO result.
+The procedure is the default behaviour of the driver. This invalidation and
+registration on each IO causes performance drop of up to 20%. A user of the
+driver may choose to load the modules with this mechanism switched off
+(always_invalidate=N), if he understands and can take the risk of a malicious
+client being able to corrupt memory of a server it is connected to. This might
+be a reasonable option in a scenario where all the clients and all the servers
+are located within a secure datacenter.
+
+
+Connection establishment
+------------------------
+
+1. Client starts establishing connections belonging to a path of a session one
+by one via attaching RTRS_MSG_CON_REQ messages to the rdma_connect requests.
+Those include uuid of the session and uuid of the path to be
+established. They are used by the server to find a persisting session/path or
+to create a new one when necessary. The message also contains the protocol
+version and magic for compatibility, total number of connections per session
+(as many as cpus on the client), the id of the current connection and
+the reconnect counter, which is used to resolve the situations where
+client is trying to reconnect a path, while server is still destroying the old
+one.
+
+2. Server accepts the connection requests one by one and attaches
+RTRS_MSG_CONN_RSP messages to the rdma_accept. Apart from magic and
+protocol version, the messages include error code, queue depth supported by
+the server (number of memory chunks which are going to be allocated for that
+session) and the maximum size of one io, RTRS_MSG_NEW_RKEY_F flags is set
+when always_invalidate=Y.
+
+3. After all connections of a path are established client sends to server the
+RTRS_MSG_INFO_REQ message, containing the name of the session. This message
+requests the address information from the server.
+
+4. Server replies to the session info request message with RTRS_MSG_INFO_RSP,
+which contains the addresses and keys of the RDMA buffers allocated for that
+session.
+
+5. Session becomes connected after all paths to be established are connected
+(i.e. steps 1-4 finished for all paths requested for a session)
+
+6. Server and client exchange periodically heartbeat messages (empty rdma
+messages with an immediate field) which are used to detect a crash on remote
+side or network outage in an absence of IO.
+
+7. On any RDMA related error or in the case of a heartbeat timeout, the
+corresponding path is disconnected, all the inflight IO are failed over to a
+healthy path, if any, and the reconnect mechanism is triggered.
+
+CLT SRV
+*for each connection belonging to a path and for each path:
+RTRS_MSG_CON_REQ ------------------->
+ <------------------- RTRS_MSG_CON_RSP
+...
+*after all connections are established:
+RTRS_MSG_INFO_REQ ------------------->
+ <------------------- RTRS_MSG_INFO_RSP
+*heartbeat is started from both sides:
+ -------------------> [RTRS_HB_MSG_IMM]
+[RTRS_HB_MSG_ACK] <-------------------
+[RTRS_HB_MSG_IMM] <-------------------
+ -------------------> [RTRS_HB_MSG_ACK]
+
+IO path
+-------
+
+* Write (always_invalidate=N) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+* Write (always_invalidate=Y) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field, Server invalidate rkey associated to the memory chunks
+first, when it finishes, pass the IO to RNBD server module.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+
+* Read (always_invalidate=N)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+
+* Read (always_invalidate=Y)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+Server invalidate rkey associated to the memory chunks first, when it finishes,
+passes the IO to RNBD server module.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
new file mode 100644
index 000000000..1e6ffafa2
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-clt.h"
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+ s = get_cpu_ptr(stats->pcpu_stats);
+ if (con->cpu != cpu) {
+ s->cpu_migr.to++;
+
+ /* Careful here, override s pointer */
+ s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
+ atomic_inc(&s->cpu_migr.from);
+ }
+ put_cpu_ptr(stats->pcpu_stats);
+}
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
+{
+ this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
+}
+
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = 0;
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += sysfs_emit_at(buf, used, "%d ",
+ atomic_read(&s->cpu_migr.from));
+ }
+
+ used += sysfs_emit_at(buf, used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = 0;
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
+ }
+
+ used += sysfs_emit_at(buf, used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
+}
+
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
+{
+ struct rtrs_clt_stats_rdma sum;
+ struct rtrs_clt_stats_rdma *r;
+ int cpu;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ sum.failover_cnt += r->failover_cnt;
+ }
+
+ return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
+ atomic_read(&stats->inflight), sum.failover_cnt);
+}
+
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
+{
+ return sysfs_emit(page, "echo 1 to reset all statistics\n");
+}
+
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->rdma, 0, sizeof(s->rdma));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
+{
+ if (!enable)
+ return -EINVAL;
+
+ memset(&stats->reconnects, 0, sizeof(stats->reconnects));
+
+ return 0;
+}
+
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
+{
+ if (enable) {
+ rtrs_clt_reset_rdma_stats(s, enable);
+ rtrs_clt_reset_cpu_migr_stats(s, enable);
+ rtrs_clt_reset_reconnects_stat(s, enable);
+ atomic_set(&s->inflight, 0);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
+ size_t size, int d)
+{
+ this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
+ this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
+}
+
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
+ unsigned int len;
+
+ len = req->usr_len + req->data_len;
+ rtrs_clt_update_rdma_stats(stats, len, dir);
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_inc(&stats->inflight);
+}
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
+{
+ stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
+ if (!stats->pcpu_stats)
+ return -ENOMEM;
+
+ /*
+ * successful_cnt will be set to 0 after session
+ * is established for the first time
+ */
+ stats->reconnects.successful_cnt = -1;
+
+ return 0;
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
new file mode 100644
index 000000000..d3c436ead
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define MIN_MAX_RECONN_ATT -1
+#define MAX_MAX_RECONN_ATT 9999
+
+static void rtrs_clt_path_release(struct kobject *kobj)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+
+ free_path(clt_path);
+}
+
+static struct kobj_type ktype_sess = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_path_release
+};
+
+static void rtrs_clt_path_stats_release(struct kobject *kobj)
+{
+ struct rtrs_clt_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_clt_stats, kobj_stats);
+
+ free_percpu(stats->pcpu_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_path_stats_release,
+};
+
+static ssize_t max_reconnect_attempts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
+
+ return sysfs_emit(page, "%d\n",
+ rtrs_clt_get_max_reconnect_attempts(clt));
+}
+
+static ssize_t max_reconnect_attempts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int value;
+ int ret;
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (ret) {
+ rtrs_err(clt, "%s: failed to convert string '%s' to int\n",
+ attr->attr.name, buf);
+ return ret;
+ }
+ if (value > MAX_MAX_RECONN_ATT ||
+ value < MIN_MAX_RECONN_ATT) {
+ rtrs_err(clt,
+ "%s: invalid range (provided: '%s', accepted: min: %d, max: %d)\n",
+ attr->attr.name, buf, MIN_MAX_RECONN_ATT,
+ MAX_MAX_RECONN_ATT);
+ return -EINVAL;
+ }
+ rtrs_clt_set_max_reconnect_attempts(clt, value);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_reconnect_attempts);
+
+static ssize_t mpath_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *clt;
+
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
+
+ switch (clt->mp_policy) {
+ case MP_POLICY_RR:
+ return sysfs_emit(page, "round-robin (RR: %d)\n",
+ clt->mp_policy);
+ case MP_POLICY_MIN_INFLIGHT:
+ return sysfs_emit(page, "min-inflight (MI: %d)\n",
+ clt->mp_policy);
+ case MP_POLICY_MIN_LATENCY:
+ return sysfs_emit(page, "min-latency (ML: %d)\n",
+ clt->mp_policy);
+ default:
+ return sysfs_emit(page, "Unknown (%d)\n", clt->mp_policy);
+ }
+}
+
+static ssize_t mpath_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rtrs_clt_sess *clt;
+ int value;
+ int ret;
+ size_t len = 0;
+
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (!ret && (value == MP_POLICY_RR ||
+ value == MP_POLICY_MIN_INFLIGHT ||
+ value == MP_POLICY_MIN_LATENCY)) {
+ clt->mp_policy = value;
+ return count;
+ }
+
+ /* distinguish "mi" and "min-latency" with length */
+ len = strnlen(buf, NAME_MAX);
+ if (buf[len - 1] == '\n')
+ len--;
+
+ if (!strncasecmp(buf, "round-robin", 11) ||
+ (len == 2 && !strncasecmp(buf, "rr", 2)))
+ clt->mp_policy = MP_POLICY_RR;
+ else if (!strncasecmp(buf, "min-inflight", 12) ||
+ (len == 2 && !strncasecmp(buf, "mi", 2)))
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else if (!strncasecmp(buf, "min-latency", 11) ||
+ (len == 2 && !strncasecmp(buf, "ml", 2)))
+ clt->mp_policy = MP_POLICY_MIN_LATENCY;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(mpath_policy);
+
+static ssize_t add_path_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ return sysfs_emit(page,
+ "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static ssize_t add_path_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sockaddr_storage srcaddr, dstaddr;
+ struct rtrs_addr addr = {
+ .src = &srcaddr,
+ .dst = &dstaddr
+ };
+ struct rtrs_clt_sess *clt;
+ const char *nl;
+ size_t len;
+ int err;
+
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
+
+ nl = strchr(buf, '\n');
+ if (nl)
+ len = nl - buf;
+ else
+ len = count;
+ err = rtrs_addr_to_sockaddr(buf, len, clt->port, &addr);
+ if (err)
+ return -EINVAL;
+
+ err = rtrs_clt_create_path_from_sysfs(clt, &addr);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(add_path);
+
+static ssize_t rtrs_clt_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (clt_path->state == RTRS_CLT_CONNECTED)
+ return sysfs_emit(page, "connected\n");
+
+ return sysfs_emit(page, "disconnected\n");
+}
+
+static struct kobj_attribute rtrs_clt_state_attr =
+ __ATTR(state, 0444, rtrs_clt_state_show, NULL);
+
+static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_path *clt_path;
+ int ret;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_reconnect_from_sysfs(clt_path);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_reconnect_attr =
+ __ATTR(reconnect, 0644, rtrs_clt_reconnect_show,
+ rtrs_clt_reconnect_store);
+
+static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ rtrs_clt_close_conns(clt_path, true);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_disconnect_attr =
+ __ATTR(disconnect, 0644, rtrs_clt_disconnect_show,
+ rtrs_clt_disconnect_store);
+
+static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_path *clt_path;
+ int ret;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_remove_path_attr =
+ __ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
+ rtrs_clt_remove_path_store);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_from,
+ rtrs_clt_stats_migration_from_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_to,
+ rtrs_clt_stats_migration_to_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reconnects,
+ rtrs_clt_stats_reconnects_to_str,
+ rtrs_clt_reset_reconnects_stat);
+
+STAT_ATTR(struct rtrs_clt_stats, rdma,
+ rtrs_clt_stats_rdma_to_str,
+ rtrs_clt_reset_rdma_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reset_all,
+ rtrs_clt_reset_all_help,
+ rtrs_clt_reset_all_stats);
+
+static struct attribute *rtrs_clt_stats_attrs[] = {
+ &cpu_migration_from_attr.attr,
+ &cpu_migration_to_attr.attr,
+ &reconnects_attr.attr,
+ &rdma_attr.attr,
+ &reset_all_attr.attr,
+ NULL
+};
+
+static const struct attribute_group rtrs_clt_stats_attr_group = {
+ .attrs = rtrs_clt_stats_attrs,
+};
+
+static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, typeof(*clt_path), kobj);
+
+ return sysfs_emit(page, "%u\n", clt_path->hca_port);
+}
+
+static struct kobj_attribute rtrs_clt_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_clt_hca_port_show, NULL);
+
+static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+
+ return sysfs_emit(page, "%s\n", clt_path->hca_name);
+}
+
+static struct kobj_attribute rtrs_clt_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+
+static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+
+ return sysfs_emit(page, "%lld ns\n",
+ ktime_to_ns(clt_path->s.hb_cur_latency));
+}
+
+static struct kobj_attribute rtrs_clt_cur_latency_attr =
+ __ATTR(cur_latency, 0444, rtrs_clt_cur_latency_show, NULL);
+
+static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+ int len;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
+}
+
+static struct kobj_attribute rtrs_clt_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_clt_src_addr_show, NULL);
+
+static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_path *clt_path;
+ int len;
+
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
+}
+
+static struct kobj_attribute rtrs_clt_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
+
+static struct attribute *rtrs_clt_path_attrs[] = {
+ &rtrs_clt_hca_name_attr.attr,
+ &rtrs_clt_hca_port_attr.attr,
+ &rtrs_clt_src_addr_attr.attr,
+ &rtrs_clt_dst_addr_attr.attr,
+ &rtrs_clt_state_attr.attr,
+ &rtrs_clt_reconnect_attr.attr,
+ &rtrs_clt_disconnect_attr.attr,
+ &rtrs_clt_remove_path_attr.attr,
+ &rtrs_clt_cur_latency_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_clt_path_attr_group = {
+ .attrs = rtrs_clt_path_attrs,
+};
+
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ char str[NAME_MAX];
+ int err;
+ struct rtrs_addr path = {
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
+ err = kobject_init_and_add(&clt_path->kobj, &ktype_sess,
+ clt->kobj_paths,
+ "%s", str);
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&clt_path->kobj);
+ return err;
+ }
+ err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats,
+ &clt_path->kobj, "stats");
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ kobject_put(&clt_path->stats->kobj_stats);
+ goto remove_group;
+ }
+
+ err = sysfs_create_group(&clt_path->stats->kobj_stats,
+ &rtrs_clt_stats_attr_group);
+ if (err) {
+ pr_err("failed to create stats sysfs group, err: %d\n", err);
+ goto put_kobj_stats;
+ }
+
+ return 0;
+
+put_kobj_stats:
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
+remove_group:
+ sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
+put_kobj:
+ kobject_del(&clt_path->kobj);
+ kobject_put(&clt_path->kobj);
+
+ return err;
+}
+
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
+ const struct attribute *sysfs_self)
+{
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
+ if (sysfs_self)
+ sysfs_remove_file_self(&clt_path->kobj, sysfs_self);
+ kobject_del(&clt_path->kobj);
+}
+
+static struct attribute *rtrs_clt_attrs[] = {
+ &dev_attr_max_reconnect_attempts.attr,
+ &dev_attr_mpath_policy.attr,
+ &dev_attr_add_path.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_clt_attr_group = {
+ .attrs = rtrs_clt_attrs,
+};
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt)
+{
+ return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
+
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt)
+{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+
+ if (clt->kobj_paths) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ }
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
new file mode 100644
index 000000000..f14fa1f36
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-clt.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-clt-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
new file mode 100644
index 000000000..7738e2676
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_clt
+
+#if !defined(_TRACE_RTRS_CLT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_CLT_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_clt_path;
+struct rtrs_clt_sess;
+
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING_ERR);
+TRACE_DEFINE_ENUM(RTRS_CLT_RECONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSED);
+TRACE_DEFINE_ENUM(RTRS_CLT_DEAD);
+
+#define show_rtrs_clt_state(x) \
+ __print_symbolic(x, \
+ { RTRS_CLT_CONNECTING, "CONNECTING" }, \
+ { RTRS_CLT_CONNECTING_ERR, "CONNECTING_ERR" }, \
+ { RTRS_CLT_RECONNECTING, "RECONNECTING" }, \
+ { RTRS_CLT_CONNECTED, "CONNECTED" }, \
+ { RTRS_CLT_CLOSING, "CLOSING" }, \
+ { RTRS_CLT_CLOSED, "CLOSED" }, \
+ { RTRS_CLT_DEAD, "DEAD" })
+
+DECLARE_EVENT_CLASS(rtrs_clt_conn_class,
+ TP_PROTO(struct rtrs_clt_path *clt_path),
+
+ TP_ARGS(clt_path),
+
+ TP_STRUCT__entry(
+ __field(int, state)
+ __field(int, reconnect_attempts)
+ __field(int, max_reconnect_attempts)
+ __field(int, fail_cnt)
+ __field(int, success_cnt)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ __entry->state = clt_path->state;
+ __entry->reconnect_attempts = clt_path->reconnect_attempts;
+ __entry->max_reconnect_attempts = clt->max_reconnect_attempts;
+ __entry->fail_cnt = clt_path->stats->reconnects.fail_cnt;
+ __entry->success_cnt = clt_path->stats->reconnects.successful_cnt;
+ memcpy(__entry->sessname, kobject_name(&clt_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("RTRS-CLT: sess='%s' state=%s attempts='%d' max-attempts='%d' fail='%d' success='%d'",
+ __entry->sessname,
+ show_rtrs_clt_state(__entry->state),
+ __entry->reconnect_attempts,
+ __entry->max_reconnect_attempts,
+ __entry->fail_cnt,
+ __entry->success_cnt
+ )
+);
+
+#define DEFINE_CLT_CONN_EVENT(name) \
+DEFINE_EVENT(rtrs_clt_conn_class, rtrs_##name, \
+ TP_PROTO(struct rtrs_clt_path *clt_path), \
+ TP_ARGS(clt_path))
+
+DEFINE_CLT_CONN_EVENT(clt_reconnect_work);
+DEFINE_CLT_CONN_EVENT(clt_close_conns);
+DEFINE_CLT_CONN_EVENT(rdma_error_recovery);
+
+#endif /* _TRACE_RTRS_CLT_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-clt-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
new file mode 100644
index 000000000..cc07c91f9
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -0,0 +1,3180 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/random.h>
+
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+#include "rtrs-clt-trace.h"
+
+#define RTRS_CONNECT_TIMEOUT_MS 30000
+/*
+ * Wait a bit before trying to reconnect after a failure
+ * in order to give server time to finish clean up which
+ * leads to "false positives" failed reconnect attempts
+ */
+#define RTRS_RECONNECT_BACKOFF 1000
+/*
+ * Wait for additional random time between 0 and 8 seconds
+ * before starting to reconnect to avoid clients reconnecting
+ * all at once in case of a major network outage
+ */
+#define RTRS_RECONNECT_SEED 8
+
+#define FIRST_CONN 0x01
+/* limit to 128 * 4k = 512k max IO */
+#define RTRS_MAX_SEGMENTS 128
+
+MODULE_DESCRIPTION("RDMA Transport Client");
+MODULE_LICENSE("GPL");
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
+
+static struct workqueue_struct *rtrs_wq;
+static struct class *rtrs_clt_dev_class;
+
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
+{
+ struct rtrs_clt_path *clt_path;
+ bool connected = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
+ connected = true;
+ break;
+ }
+ rcu_read_unlock();
+
+ return connected;
+}
+
+static struct rtrs_permit *
+__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
+{
+ size_t max_depth = clt->queue_depth;
+ struct rtrs_permit *permit;
+ int bit;
+
+ /*
+ * Adapted from null_blk get_tag(). Callers from different cpus may
+ * grab the same bit, since find_first_zero_bit is not atomic.
+ * But then the test_and_set_bit_lock will fail for all the
+ * callers but one, so that they will loop again.
+ * This way an explicit spinlock is not required.
+ */
+ do {
+ bit = find_first_zero_bit(clt->permits_map, max_depth);
+ if (bit >= max_depth)
+ return NULL;
+ } while (test_and_set_bit_lock(bit, clt->permits_map));
+
+ permit = get_permit(clt, bit);
+ WARN_ON(permit->mem_id != bit);
+ permit->cpu_id = raw_smp_processor_id();
+ permit->con_type = con_type;
+
+ return permit;
+}
+
+static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
+ struct rtrs_permit *permit)
+{
+ clear_bit_unlock(permit->mem_id, clt->permits_map);
+}
+
+/**
+ * rtrs_clt_get_permit() - allocates permit for future RDMA operation
+ * @clt: Current session
+ * @con_type: Type of connection to use with the permit
+ * @can_wait: Wait type
+ *
+ * Description:
+ * Allocates permit for the following RDMA operation. Permit is used
+ * to preallocate all resources and to propagate memory pressure
+ * up earlier.
+ *
+ * Context:
+ * Can sleep if @wait == RTRS_PERMIT_WAIT
+ */
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
+ enum rtrs_clt_con_type con_type,
+ enum wait_type can_wait)
+{
+ struct rtrs_permit *permit;
+ DEFINE_WAIT(wait);
+
+ permit = __rtrs_get_permit(clt, con_type);
+ if (permit || !can_wait)
+ return permit;
+
+ do {
+ prepare_to_wait(&clt->permits_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ permit = __rtrs_get_permit(clt, con_type);
+ if (permit)
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&clt->permits_wait, &wait);
+
+ return permit;
+}
+EXPORT_SYMBOL(rtrs_clt_get_permit);
+
+/**
+ * rtrs_clt_put_permit() - puts allocated permit
+ * @clt: Current session
+ * @permit: Permit to be freed
+ *
+ * Context:
+ * Does not matter
+ */
+void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
+ struct rtrs_permit *permit)
+{
+ if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
+ return;
+
+ __rtrs_put_permit(clt, permit);
+
+ /*
+ * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
+ * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
+ * it must have added itself to &clt->permits_wait before
+ * __rtrs_put_permit() finished.
+ * Hence it is safe to guard wake_up() with a waitqueue_active() test.
+ */
+ if (waitqueue_active(&clt->permits_wait))
+ wake_up(&clt->permits_wait);
+}
+EXPORT_SYMBOL(rtrs_clt_put_permit);
+
+/**
+ * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
+ * @clt_path: client path pointer
+ * @permit: permit for the allocation of the RDMA buffer
+ * Note:
+ * IO connection starts from 1.
+ * 0 connection is for user messages.
+ */
+static
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
+ struct rtrs_permit *permit)
+{
+ int id = 0;
+
+ if (permit->con_type == RTRS_IO_CON)
+ id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
+
+ return to_clt_con(clt_path->s.con[id]);
+}
+
+/**
+ * rtrs_clt_change_state() - change the session state through session state
+ * machine.
+ *
+ * @clt_path: client path to change the state of.
+ * @new_state: state to change to.
+ *
+ * returns true if sess's state is changed to new state, otherwise return false.
+ *
+ * Locks:
+ * state_wq lock must be hold.
+ */
+static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&clt_path->state_wq.lock);
+
+ old_state = clt_path->state;
+ switch (new_state) {
+ case RTRS_CLT_CONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_RECONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_RECONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTED:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTED:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTING_ERR:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_RECONNECTING:
+ case RTRS_CLT_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSED:
+ switch (old_state) {
+ case RTRS_CLT_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_DEAD:
+ switch (old_state) {
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed) {
+ clt_path->state = new_state;
+ wake_up_locked(&clt_path->state_wq);
+ }
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
+ enum rtrs_clt_state old_state,
+ enum rtrs_clt_state new_state)
+{
+ bool changed = false;
+
+ spin_lock_irq(&clt_path->state_wq.lock);
+ if (clt_path->state == old_state)
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
+static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ trace_rtrs_rdma_error_recovery(clt_path);
+
+ if (rtrs_clt_change_state_from_to(clt_path,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_RECONNECTING)) {
+ queue_work(rtrs_wq, &clt_path->err_recovery_work);
+ } else {
+ /*
+ * Error can happen just on establishing new connection,
+ * so notify waiter with error state, waiter is responsible
+ * for cleaning the rest and reconnect if needed.
+ */
+ rtrs_clt_change_state_from_to(clt_path,
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR);
+ }
+}
+
+static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static struct ib_cqe fast_reg_cqe = {
+ .done = rtrs_clt_fast_reg_done
+};
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait);
+
+static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_io_req *req =
+ container_of(wc->wr_cqe, typeof(*req), inv_cqe);
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ req->need_inv = false;
+ if (req->need_inv_comp)
+ complete(&req->inv_comp);
+ else
+ /* Complete request from INV callback */
+ complete_rdma_req(req, req->inv_errno, true, false);
+}
+
+static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_path *clt_path;
+ int err;
+
+ if (!req->in_use)
+ return;
+ if (WARN_ON(!req->con))
+ return;
+ clt_path = to_clt_path(con->c.path);
+
+ if (req->sg_cnt) {
+ if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
+ /*
+ * We are here to invalidate read requests
+ * ourselves. In normal scenario server should
+ * send INV for all read requests, but
+ * we are here, thus two things could happen:
+ *
+ * 1. this is failover, when errno != 0
+ * and can_wait == 1,
+ *
+ * 2. something totally bad happened and
+ * server forgot to send INV, so we
+ * should do that ourselves.
+ */
+
+ if (can_wait) {
+ req->need_inv_comp = true;
+ } else {
+ /* This should be IO path, so always notify */
+ WARN_ON(!notify);
+ /* Save errno for INV callback */
+ req->inv_errno = errno;
+ }
+
+ refcount_inc(&req->ref);
+ err = rtrs_inv_rkey(req);
+ if (err) {
+ rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
+ req->mr->rkey, err);
+ } else if (can_wait) {
+ wait_for_completion(&req->inv_comp);
+ } else {
+ /*
+ * Something went wrong, so request will be
+ * completed from INV callback.
+ */
+ WARN_ON_ONCE(1);
+
+ return;
+ }
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ }
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&clt_path->stats->inflight);
+
+ req->in_use = false;
+ req->con = NULL;
+
+ if (errno) {
+ rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port, notify);
+ }
+
+ if (notify)
+ req->conf(req->priv, errno);
+}
+
+static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, u32 off,
+ u32 imm, struct ib_send_wr *wr)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ enum ib_send_flags flags;
+ struct ib_sge sge;
+
+ if (!req->sg_size) {
+ rtrs_wrn(con->c.path,
+ "Doing RDMA Write failed, no data supplied\n");
+ return -EINVAL;
+ }
+
+ /* user data and user message in the first list element */
+ sge.addr = req->iu->dma_addr;
+ sge.length = req->sg_size;
+ sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
+ req->sg_size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
+ rbuf->rkey, rbuf->addr + off,
+ imm, flags, wr, NULL);
+}
+
+static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
+ s16 errno, bool w_inval)
+{
+ struct rtrs_clt_io_req *req;
+
+ if (WARN_ON(msg_id >= clt_path->queue_depth))
+ return;
+
+ req = &clt_path->reqs[msg_id];
+ /* Drop need_inv if server responded with send with invalidation */
+ req->need_inv &= !w_inval;
+ complete_rdma_req(req, errno, true, false);
+}
+
+static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_iu *iu;
+ int err;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu,
+ cqe);
+ err = rtrs_iu_post_recv(&con->c, iu);
+ if (err) {
+ rtrs_err(con->c.path, "post iu failed %d\n", err);
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_msg_rkey_rsp *msg;
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ struct rtrs_iu *iu;
+ u32 buf_id;
+ int err;
+
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+
+ if (wc->byte_len < sizeof(*msg)) {
+ rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
+ rtrs_err(clt_path->clt,
+ "rkey response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ buf_id = le16_to_cpu(msg->buf_id);
+ if (WARN_ON(buf_id >= clt_path->queue_depth))
+ goto out;
+
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
+ if (imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ if (WARN_ON(buf_id != msg_id))
+ goto out;
+ clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
+ }
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ return rtrs_clt_recv_done(con, wc);
+out:
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_clt_rdma_done
+};
+
+/*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr_arr[2], *wr;
+ int i;
+
+ memset(wr_arr, 0, sizeof(wr_arr));
+ for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
+ wr = &wr_arr[i];
+ wr->wr_cqe = cqe;
+ if (i)
+ /* Chain backwards */
+ wr->next = &wr_arr[i - 1];
+ }
+
+ return ib_post_recv(con->qp, wr, NULL);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ int err;
+
+ if (wc->status != IB_WC_SUCCESS) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(clt_path->clt, "RDMA failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ return;
+ }
+ rtrs_clt_update_wc_stats(con);
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
+ return;
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ process_io_rsp(clt_path, msg_id, err, w_inval);
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&clt_path->s);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ clt_path->s.hb_missed_cnt = 0;
+ clt_path->s.hb_cur_latency =
+ ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else {
+ rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
+ imm_type);
+ }
+ if (w_inval)
+ /*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+ err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
+ else
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (err) {
+ rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
+ err);
+ rtrs_rdma_error_recovery(con);
+ }
+ break;
+ case IB_WC_RECV:
+ /*
+ * Key invalidations from server side
+ */
+ WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ wc->wc_flags & IB_WC_WITH_IMM));
+ WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
+ return rtrs_clt_recv_done(con, wc);
+
+ return rtrs_clt_rkey_rsp_done(con, wc);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb.
+ */
+ break;
+
+ default:
+ rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
+{
+ int err, i;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ for (i = 0; i < q_size; i++) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
+ struct rtrs_iu *iu = &con->rsp_ius[i];
+
+ err = rtrs_iu_post_recv(&con->c, iu);
+ } else {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ }
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_path(struct rtrs_clt_path *clt_path)
+{
+ size_t q_size = 0;
+ int err, cid;
+
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = clt_path->queue_depth;
+
+ /*
+ * x2 for RDMA read responses + FR key invalidations,
+ * RDMA writes do not require any FR registrations.
+ */
+ q_size *= 2;
+
+ err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
+ if (err) {
+ rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct path_it {
+ int i;
+ struct list_head skip_list;
+ struct rtrs_clt_sess *clt;
+ struct rtrs_clt_path *(*next_path)(struct path_it *it);
+};
+
+/*
+ * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
+ * @head: the head for the list.
+ * @clt_path: The element to take the next clt_path from.
+ *
+ * Next clt path returned in round-robin fashion, i.e. head will be skipped,
+ * but if list is observed as empty, NULL will be returned.
+ *
+ * This function may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+static inline struct rtrs_clt_path *
+rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
+{
+ return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
+ list_next_or_null_rcu(head,
+ READ_ONCE((&clt_path->s.entry)->next),
+ typeof(*clt_path), s.entry);
+}
+
+/**
+ * get_next_path_rr() - Returns path in round-robin fashion.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_RR
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
+{
+ struct rtrs_clt_path __rcu **ppcpu_path;
+ struct rtrs_clt_path *path;
+ struct rtrs_clt_sess *clt;
+
+ clt = it->clt;
+
+ /*
+ * Here we use two RCU objects: @paths_list and @pcpu_path
+ * pointer. See rtrs_clt_remove_path_from_arr() for details
+ * how that is handled.
+ */
+
+ ppcpu_path = this_cpu_ptr(clt->pcpu_path);
+ path = rcu_dereference(*ppcpu_path);
+ if (!path)
+ path = list_first_or_null_rcu(&clt->paths_list,
+ typeof(*path), s.entry);
+ else
+ path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
+
+ rcu_assign_pointer(*ppcpu_path, path);
+
+ return path;
+}
+
+/**
+ * get_next_path_min_inflight() - Returns path with minimal inflight count.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_MIN_INFLIGHT
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
+{
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
+ int min_inflight = INT_MAX;
+ int inflight;
+
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
+ continue;
+
+ inflight = atomic_read(&clt_path->stats->inflight);
+
+ if (inflight < min_inflight) {
+ min_inflight = inflight;
+ min_path = clt_path;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+/**
+ * get_next_path_min_latency() - Returns path with minimal latency.
+ * @it: the path pointer
+ *
+ * Return: a path with the lowest latency or NULL if all paths are tried
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ *
+ * Related to @MP_POLICY_MIN_LATENCY
+ *
+ * This DOES skip an already-tried path.
+ * There is a skip-list to skip a path if the path has tried but failed.
+ * It will try the minimum latency path and then the second minimum latency
+ * path and so on. Finally it will return NULL if all paths are tried.
+ * Therefore the caller MUST check the returned
+ * path is NULL and trigger the IO error.
+ */
+static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
+{
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
+ ktime_t min_latency = KTIME_MAX;
+ ktime_t latency;
+
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
+ continue;
+
+ latency = clt_path->s.hb_cur_latency;
+
+ if (latency < min_latency) {
+ min_latency = latency;
+ min_path = clt_path;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
+{
+ INIT_LIST_HEAD(&it->skip_list);
+ it->clt = clt;
+ it->i = 0;
+
+ if (clt->mp_policy == MP_POLICY_RR)
+ it->next_path = get_next_path_rr;
+ else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ it->next_path = get_next_path_min_inflight;
+ else
+ it->next_path = get_next_path_min_latency;
+}
+
+static inline void path_it_deinit(struct path_it *it)
+{
+ struct list_head *skip, *tmp;
+ /*
+ * The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies.
+ * We need to remove paths from it, so that next IO can insert
+ * paths (->mp_skip_entry) into a skip_list again.
+ */
+ list_for_each_safe(skip, tmp, &it->skip_list)
+ list_del_init(skip);
+}
+
+/**
+ * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
+ * about an inflight IO.
+ * The user buffer holding user control message (not data) is copied into
+ * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
+ * also hold the control message of rtrs.
+ * @req: an io request holding information about IO.
+ * @clt_path: client path
+ * @conf: conformation callback function to notify upper layer.
+ * @permit: permit for allocation of RDMA remote buffer
+ * @priv: private pointer
+ * @vec: kernel vector containing control message
+ * @usr_len: length of the user message
+ * @sg: scater list for IO data
+ * @sg_cnt: number of scater list entries
+ * @data_len: length of the IO data
+ * @dir: direction of the IO.
+ */
+static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
+ struct rtrs_clt_path *clt_path,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct iov_iter iter;
+ size_t len;
+
+ req->permit = permit;
+ req->in_use = true;
+ req->usr_len = usr_len;
+ req->data_len = data_len;
+ req->sglist = sg;
+ req->sg_cnt = sg_cnt;
+ req->priv = priv;
+ req->dir = dir;
+ req->con = rtrs_permit_to_clt_con(clt_path, permit);
+ req->conf = conf;
+ req->need_inv = false;
+ req->need_inv_comp = false;
+ req->inv_errno = 0;
+ refcount_set(&req->ref, 1);
+ req->mp_policy = clt_path->clt->mp_policy;
+
+ iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
+ len = _copy_from_iter(req->iu->buf, usr_len, &iter);
+ WARN_ON(len != usr_len);
+
+ reinit_completion(&req->inv_comp);
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct rtrs_clt_io_req *req;
+
+ req = &clt_path->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
+ sg, sg_cnt, data_len, dir);
+ return req;
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_io_req *req;
+ struct kvec vec = {
+ .iov_base = fail_req->iu->buf,
+ .iov_len = fail_req->usr_len
+ };
+
+ req = &alive_path->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
+ fail_req->priv, &vec, fail_req->usr_len,
+ fail_req->sglist, fail_req->sg_cnt,
+ fail_req->data_len, fail_req->dir);
+ return req;
+}
+
+static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, bool fr_en,
+ u32 count, u32 size, u32 imm,
+ struct ib_send_wr *wr,
+ struct ib_send_wr *tail)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct ib_sge *sge = req->sge;
+ enum ib_send_flags flags;
+ struct scatterlist *sg;
+ size_t num_sge;
+ int i;
+ struct ib_send_wr *ptail = NULL;
+
+ if (fr_en) {
+ i = 0;
+ sge[i].addr = req->mr->iova;
+ sge[i].length = req->mr->length;
+ sge[i].lkey = req->mr->lkey;
+ i++;
+ num_sge = 2;
+ ptail = tail;
+ } else {
+ for_each_sg(req->sglist, sg, count, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
+ }
+ num_sge = 1 + count;
+ }
+ sge[i].addr = req->iu->dma_addr;
+ sge[i].length = size;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
+ size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
+ rbuf->rkey, rbuf->addr, imm,
+ flags, wr, ptail);
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr < 0)
+ return nr;
+ if (nr < req->sg_cnt)
+ return -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
+}
+
+static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ struct rtrs_msg_rdma_write *msg;
+
+ struct rtrs_rbuf *rbuf;
+ int ret, count = 0;
+ u32 imm, buf_id;
+ struct ib_reg_wr rwr;
+ struct ib_send_wr inv_wr;
+ struct ib_send_wr *wr = NULL;
+ bool fr_en = false;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (tsize > clt_path->chunk_size) {
+ rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
+ tsize, clt_path->chunk_size);
+ return -EMSGSIZE;
+ }
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ if (!count) {
+ rtrs_wrn(s, "Write request failed, map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put rtrs msg after sg and user message */
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_WRITE);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ /* rtrs message on server side will be after user data and message */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+ req->sg_size = tsize;
+ rbuf = &clt_path->rbufs[buf_id];
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Write request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ return ret;
+ }
+ inv_wr = (struct ib_send_wr) {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE),
+ };
+ wr = &rwr.wr;
+ fr_en = true;
+ refcount_inc(&req->ref);
+ }
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, WRITE);
+
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
+ req->usr_len + sizeof(*msg),
+ imm, wr, &inv_wr);
+ if (ret) {
+ rtrs_err_rl(s,
+ "Write request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&clt_path->stats->inflight);
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ struct rtrs_msg_rdma_read *msg;
+ struct rtrs_ib_dev *dev = clt_path->s.dev;
+
+ struct ib_reg_wr rwr;
+ struct ib_send_wr *wr = NULL;
+
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (tsize > clt_path->chunk_size) {
+ rtrs_wrn(s,
+ "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
+ tsize, clt_path->chunk_size);
+ return -EMSGSIZE;
+ }
+
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ if (!count) {
+ rtrs_wrn(s,
+ "Read request failed, dma map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put our message into req->buf after user message*/
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_READ);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Read request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ return ret;
+ }
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ };
+ wr = &rwr.wr;
+
+ msg->sg_cnt = cpu_to_le16(1);
+ msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
+
+ msg->desc[0].addr = cpu_to_le64(req->mr->iova);
+ msg->desc[0].key = cpu_to_le32(req->mr->rkey);
+ msg->desc[0].len = cpu_to_le32(req->mr->length);
+
+ /* Further invalidation is required */
+ req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+
+ } else {
+ msg->sg_cnt = 0;
+ msg->flags = 0;
+ }
+ /*
+ * rtrs message will be after the space reserved for disk data and
+ * user message
+ */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+
+ req->sg_size = sizeof(*msg);
+ req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
+ req->sg_size += req->usr_len;
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, READ);
+
+ ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
+ req->data_len, imm, wr);
+ if (ret) {
+ rtrs_err_rl(s,
+ "Read request failed: error=%d path=%s [%s:%u]\n",
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
+ if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&clt_path->stats->inflight);
+ req->need_inv = false;
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+/**
+ * rtrs_clt_failover_req() - Try to find an active path for a failed request
+ * @clt: clt context
+ * @fail_req: a failed io request.
+ */
+static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_path *alive_path;
+ struct rtrs_clt_io_req *req;
+ int err = -ECONNABORTED;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
+ it.i++) {
+ if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+ req = rtrs_clt_get_copy_req(alive_path, fail_req);
+ if (req->dir == DMA_TO_DEVICE)
+ err = rtrs_clt_write_req(req);
+ else
+ err = rtrs_clt_read_req(req);
+ if (err) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ rtrs_clt_inc_failover_cnt(alive_path->stats);
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_clt_io_req *req;
+ int i, err;
+
+ if (!clt_path->reqs)
+ return;
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ if (!req->in_use)
+ continue;
+
+ /*
+ * Safely (without notification) complete failed request.
+ * After completion this request is still useble and can
+ * be failovered to another path.
+ */
+ complete_rdma_req(req, -ECONNABORTED, false, true);
+
+ err = rtrs_clt_failover_req(clt, req);
+ if (err)
+ /* Failover failed, notify anyway */
+ req->conf(req->priv, err);
+ }
+}
+
+static void free_path_reqs(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_io_req *req;
+ int i;
+
+ if (!clt_path->reqs)
+ return;
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+ kfree(req->sge);
+ rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
+ }
+ kfree(clt_path->reqs);
+ clt_path->reqs = NULL;
+}
+
+static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_io_req *req;
+ int i, err = -ENOMEM;
+
+ clt_path->reqs = kcalloc(clt_path->queue_depth,
+ sizeof(*clt_path->reqs),
+ GFP_KERNEL);
+ if (!clt_path->reqs)
+ return -ENOMEM;
+
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
+ DMA_TO_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!req->iu)
+ goto out;
+
+ req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
+ if (!req->sge)
+ goto out;
+
+ req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
+ IB_MR_TYPE_MEM_REG,
+ clt_path->max_pages_per_mr);
+ if (IS_ERR(req->mr)) {
+ err = PTR_ERR(req->mr);
+ req->mr = NULL;
+ pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
+ clt_path->max_pages_per_mr);
+ goto out;
+ }
+
+ init_completion(&req->inv_comp);
+ }
+
+ return 0;
+
+out:
+ free_path_reqs(clt_path);
+
+ return err;
+}
+
+static int alloc_permits(struct rtrs_clt_sess *clt)
+{
+ unsigned int chunk_bits;
+ int err, i;
+
+ clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
+ if (!clt->permits_map) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
+ if (!clt->permits) {
+ err = -ENOMEM;
+ goto err_map;
+ }
+ chunk_bits = ilog2(clt->queue_depth - 1) + 1;
+ for (i = 0; i < clt->queue_depth; i++) {
+ struct rtrs_permit *permit;
+
+ permit = get_permit(clt, i);
+ permit->mem_id = i;
+ permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
+ }
+
+ return 0;
+
+err_map:
+ bitmap_free(clt->permits_map);
+ clt->permits_map = NULL;
+out_err:
+ return err;
+}
+
+static void free_permits(struct rtrs_clt_sess *clt)
+{
+ if (clt->permits_map)
+ wait_event(clt->permits_wait,
+ bitmap_empty(clt->permits_map, clt->queue_depth));
+
+ bitmap_free(clt->permits_map);
+ clt->permits_map = NULL;
+ kfree(clt->permits);
+ clt->permits = NULL;
+}
+
+static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
+{
+ struct ib_device *ib_dev;
+ u64 max_pages_per_mr;
+ int mr_page_shift;
+
+ ib_dev = clt_path->s.dev->ib_dev;
+
+ /*
+ * Use the smallest page size supported by the HCA, down to a
+ * minimum of 4096 bytes. We're unlikely to build large sglists
+ * out of smaller entries.
+ */
+ mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
+ max_pages_per_mr = ib_dev->attrs.max_mr_size;
+ do_div(max_pages_per_mr, (1ull << mr_page_shift));
+ clt_path->max_pages_per_mr =
+ min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
+ ib_dev->attrs.max_fast_reg_page_list_len);
+ clt_path->clt->max_segments =
+ min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
+}
+
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
+ enum rtrs_clt_state new_state,
+ enum rtrs_clt_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&clt_path->state_wq.lock);
+ if (old_state)
+ *old_state = clt_path->state;
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
+{
+ struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
+{
+ rtrs_init_hb(&clt_path->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_clt_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work);
+static void rtrs_clt_close_work(struct work_struct *work);
+
+static void rtrs_clt_err_recovery_work(struct work_struct *work)
+{
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
+ int delay_ms;
+
+ clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
+ clt = clt_path->clt;
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
+ msecs_to_jiffies(delay_ms +
+ prandom_u32_max(RTRS_RECONNECT_SEED)));
+}
+
+static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
+ const struct rtrs_addr *path,
+ size_t con_num, u32 nr_poll_queues)
+{
+ struct rtrs_clt_path *clt_path;
+ int err = -ENOMEM;
+ int cpu;
+ size_t total_con;
+
+ clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
+ if (!clt_path)
+ goto err;
+
+ /*
+ * irqmode and poll
+ * +1: Extra connection for user messages
+ */
+ total_con = con_num + nr_poll_queues + 1;
+ clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
+ GFP_KERNEL);
+ if (!clt_path->s.con)
+ goto err_free_path;
+
+ clt_path->s.con_num = total_con;
+ clt_path->s.irq_con_num = con_num + 1;
+
+ clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
+ if (!clt_path->stats)
+ goto err_free_con;
+
+ mutex_init(&clt_path->init_mutex);
+ uuid_gen(&clt_path->s.uuid);
+ memcpy(&clt_path->s.dst_addr, path->dst,
+ rdma_addr_size((struct sockaddr *)path->dst));
+
+ /*
+ * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
+ * checks the sa_family to be non-zero. If user passed src_addr=NULL
+ * the sess->src_addr will contain only zeros, which is then fine.
+ */
+ if (path->src)
+ memcpy(&clt_path->s.src_addr, path->src,
+ rdma_addr_size((struct sockaddr *)path->src));
+ strscpy(clt_path->s.sessname, clt->sessname,
+ sizeof(clt_path->s.sessname));
+ clt_path->clt = clt;
+ clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
+ init_waitqueue_head(&clt_path->state_wq);
+ clt_path->state = RTRS_CLT_CONNECTING;
+ atomic_set(&clt_path->connected_cnt, 0);
+ INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
+ INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
+ INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(clt_path);
+
+ clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
+ if (!clt_path->mp_skip_entry)
+ goto err_free_stats;
+
+ for_each_possible_cpu(cpu)
+ INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
+
+ err = rtrs_clt_init_stats(clt_path->stats);
+ if (err)
+ goto err_free_percpu;
+
+ return clt_path;
+
+err_free_percpu:
+ free_percpu(clt_path->mp_skip_entry);
+err_free_stats:
+ kfree(clt_path->stats);
+err_free_con:
+ kfree(clt_path->s.con);
+err_free_path:
+ kfree(clt_path);
+err:
+ return ERR_PTR(err);
+}
+
+void free_path(struct rtrs_clt_path *clt_path)
+{
+ free_percpu(clt_path->mp_skip_entry);
+ mutex_destroy(&clt_path->init_mutex);
+ kfree(clt_path->s.con);
+ kfree(clt_path->rbufs);
+ kfree(clt_path);
+}
+
+static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
+{
+ struct rtrs_clt_con *con;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ /* Map first two connections to the first CPU */
+ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
+ con->c.cid = cid;
+ con->c.path = &clt_path->s;
+ /* Align with srv, init as 1 */
+ atomic_set(&con->c.wr_cnt, 1);
+ mutex_init(&con->con_mutex);
+
+ clt_path->s.con[cid] = &con->c;
+
+ return 0;
+}
+
+static void destroy_con(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ clt_path->s.con[con->c.cid] = NULL;
+ mutex_destroy(&con->con_mutex);
+ kfree(con);
+}
+
+static int create_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
+ int err, cq_vector;
+ struct rtrs_msg_rkey_rsp *rsp;
+
+ lockdep_assert_held(&con->con_mutex);
+ if (con->c.cid == 0) {
+ max_send_sge = 1;
+ /* We must be the first here */
+ if (WARN_ON(clt_path->s.dev))
+ return -EINVAL;
+
+ /*
+ * The whole session uses device from user connection.
+ * Be careful not to close user connection before ib dev
+ * is gracefully put.
+ */
+ clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ &dev_pd);
+ if (!clt_path->s.dev) {
+ rtrs_wrn(clt_path->clt,
+ "rtrs_ib_dev_find_get_or_add(): no memory\n");
+ return -ENOMEM;
+ }
+ clt_path->s.dev_ref = 1;
+ query_fast_reg_mode(clt_path);
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
+ /*
+ * Two (request + registration) completion for send
+ * Two for recv if always_invalidate is set on server
+ * or one for recv.
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state.
+ */
+ max_send_wr =
+ min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+ max_recv_wr = max_send_wr;
+ } else {
+ /*
+ * Here we assume that session members are correctly set.
+ * This is always true if user connection (cid == 0) is
+ * established first.
+ */
+ if (WARN_ON(!clt_path->s.dev))
+ return -EINVAL;
+ if (WARN_ON(!clt_path->queue_depth))
+ return -EINVAL;
+
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
+ /* Shared between connections */
+ clt_path->s.dev_ref++;
+ max_send_wr = min_t(int, wr_limit,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ clt_path->queue_depth * 4 + 1);
+ max_recv_wr = min_t(int, wr_limit,
+ clt_path->queue_depth * 3 + 1);
+ max_send_sge = 2;
+ }
+ atomic_set(&con->c.sq_wr_avail, max_send_wr);
+ cq_num = max_send_wr + max_recv_wr;
+ /* alloc iu to recv new rkey reply when server reports flags set */
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
+ GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
+ DMA_FROM_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!con->rsp_ius)
+ return -ENOMEM;
+ con->queue_num = cq_num;
+ }
+ cq_num = max_send_wr + max_recv_wr;
+ cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
+ if (con->c.cid >= clt_path->s.irq_con_num)
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
+ max_recv_wr, IB_POLL_DIRECT);
+ else
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
+ max_recv_wr, IB_POLL_SOFTIRQ);
+ /*
+ * In case of error we do not bother to clean previous allocations,
+ * since destroy_con_cq_qp() must be called.
+ */
+ return err;
+}
+
+static void destroy_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ /*
+ * Be careful here: destroy_con_cq_qp() can be called even
+ * create_con_cq_qp() failed, see comments there.
+ */
+ lockdep_assert_held(&con->con_mutex);
+ rtrs_cq_qp_destroy(&con->c);
+ if (con->rsp_ius) {
+ rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
+ con->queue_num);
+ con->rsp_ius = NULL;
+ con->queue_num = 0;
+ }
+ if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
+ rtrs_ib_dev_put(clt_path->s.dev);
+ clt_path->s.dev = NULL;
+ }
+}
+
+static void stop_cm(struct rtrs_clt_con *con)
+{
+ rdma_disconnect(con->c.cm_id);
+ if (con->c.qp)
+ ib_drain_qp(con->c.qp);
+}
+
+static void destroy_cm(struct rtrs_clt_con *con)
+{
+ rdma_destroy_id(con->c.cm_id);
+ con->c.cm_id = NULL;
+}
+
+static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_path *s = con->c.path;
+ int err;
+
+ mutex_lock(&con->con_mutex);
+ err = create_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
+ if (err) {
+ rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
+ if (err)
+ rtrs_err(s, "Resolving route failed, err: %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_msg_conn_req msg;
+ struct rdma_conn_param param;
+
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_req) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .cid = cpu_to_le16(con->c.cid),
+ .cid_num = cpu_to_le16(clt_path->s.con_num),
+ .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
+ };
+ msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
+ uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
+ uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+
+ err = rdma_connect_locked(con->c.cm_id, &param);
+ if (err)
+ rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ const struct rtrs_msg_conn_rsp *msg;
+ u16 version, queue_depth;
+ int errno;
+ u8 len;
+
+ msg = ev->param.conn.private_data;
+ len = ev->param.conn.private_data_len;
+ if (len < sizeof(*msg)) {
+ rtrs_err(clt, "Invalid RTRS connection response\n");
+ return -ECONNRESET;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ rtrs_err(clt, "Invalid RTRS magic\n");
+ return -ECONNRESET;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ return -ECONNRESET;
+ }
+ errno = le16_to_cpu(msg->errno);
+ if (errno) {
+ rtrs_err(clt, "Invalid RTRS message: errno %d\n",
+ errno);
+ return -ECONNRESET;
+ }
+ if (con->c.cid == 0) {
+ queue_depth = le16_to_cpu(msg->queue_depth);
+
+ if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
+ rtrs_err(clt, "Error: queue depth changed\n");
+
+ /*
+ * Stop any more reconnection attempts
+ */
+ clt_path->reconnect_attempts = -1;
+ rtrs_err(clt,
+ "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
+ return -ECONNRESET;
+ }
+
+ if (!clt_path->rbufs) {
+ clt_path->rbufs = kcalloc(queue_depth,
+ sizeof(*clt_path->rbufs),
+ GFP_KERNEL);
+ if (!clt_path->rbufs)
+ return -ENOMEM;
+ }
+ clt_path->queue_depth = queue_depth;
+ clt_path->s.signal_interval = min_not_zero(queue_depth,
+ (unsigned short) SERVICE_CON_QUEUE_DEPTH);
+ clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
+ clt_path->flags = le32_to_cpu(msg->flags);
+ clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
+
+ /*
+ * Global IO size is always a minimum.
+ * If while a reconnection server sends us a value a bit
+ * higher - client does not care and uses cached minimum.
+ *
+ * Since we can have several sessions (paths) restablishing
+ * connections in parallel, use lock.
+ */
+ mutex_lock(&clt->paths_mutex);
+ clt->queue_depth = clt_path->queue_depth;
+ clt->max_io_size = min_not_zero(clt_path->max_io_size,
+ clt->max_io_size);
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * Cache the hca_port and hca_name for sysfs
+ */
+ clt_path->hca_port = con->c.cm_id->port_num;
+ scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
+ clt_path->s.dev->ib_dev->name);
+ clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ /* set for_new_clt, to allow future reconnect on any path */
+ clt_path->for_new_clt = 1;
+ }
+
+ return 0;
+}
+
+static inline void flag_success_on_conn(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+
+ atomic_inc(&clt_path->connected_cnt);
+ con->cm_err = 1;
+}
+
+static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_path *s = con->c.path;
+ const struct rtrs_msg_conn_rsp *msg;
+ const char *rej_msg;
+ int status, errno;
+ u8 data_len;
+
+ status = ev->status;
+ rej_msg = rdma_reject_msg(con->c.cm_id, status);
+ msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
+
+ if (msg && data_len >= sizeof(*msg)) {
+ errno = (int16_t)le16_to_cpu(msg->errno);
+ if (errno == -EBUSY)
+ rtrs_err(s,
+ "Previous session is still exists on the server, please reconnect later\n");
+ else
+ rtrs_err(s,
+ "Connect rejected: status %d (%s), rtrs errno %d\n",
+ status, rej_msg, errno);
+ } else {
+ rtrs_err(s,
+ "Connect rejected but with malformed message: status %d (%s)\n",
+ status, rej_msg);
+ }
+
+ return -ECONNRESET;
+}
+
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
+{
+ trace_rtrs_clt_close_conns(clt_path);
+
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
+ queue_work(rtrs_wq, &clt_path->close_work);
+ if (wait)
+ flush_work(&clt_path->close_work);
+}
+
+static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
+{
+ if (con->cm_err == 1) {
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = to_clt_path(con->c.path);
+ if (atomic_dec_and_test(&clt_path->connected_cnt))
+
+ wake_up(&clt_path->state_wq);
+ }
+ con->cm_err = cm_err;
+}
+
+static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_con *con = cm_id->context;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ int cm_err = 0;
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_err = rtrs_rdma_addr_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_err = rtrs_rdma_route_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ cm_err = rtrs_rdma_conn_established(con, ev);
+ if (!cm_err) {
+ /*
+ * Report success and wake up. Here we abuse state_wq,
+ * i.e. wake up without state change, but we set cm_err.
+ */
+ flag_success_on_conn(con);
+ wake_up(&clt_path->state_wq);
+ return 0;
+ }
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_err = rtrs_rdma_conn_rejected(con, ev);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ /* No message for disconnecting */
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ cm_err = -EHOSTUNREACH;
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /*
+ * Device removal is a special case. Queue close and return 0.
+ */
+ rtrs_clt_close_conns(clt_path, false);
+ return 0;
+ default:
+ rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ cm_err = -ECONNRESET;
+ break;
+ }
+
+ if (cm_err) {
+ /*
+ * cm error makes sense only on connection establishing,
+ * in other cases we rely on normal procedure of reconnecting.
+ */
+ flag_error_on_conn(con, cm_err);
+ rtrs_rdma_error_recovery(con);
+ }
+
+ return 0;
+}
+
+/* The caller should do the cleanup in case of error */
+static int create_cm(struct rtrs_clt_con *con)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
+ struct rdma_cm_id *cm_id;
+ int err;
+
+ cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
+ clt_path->s.dst_addr.ss_family == AF_IB ?
+ RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ err = PTR_ERR(cm_id);
+ rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
+
+ return err;
+ }
+ con->c.cm_id = cm_id;
+ con->cm_err = 0;
+ /* allow the port to be reused */
+ err = rdma_set_reuseaddr(cm_id, 1);
+ if (err != 0) {
+ rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+ (struct sockaddr *)&clt_path->s.dst_addr,
+ RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+ return err;
+ }
+ /*
+ * Combine connection status and session events. This is needed
+ * for waiting two possible cases: cm_err has something meaningful
+ * or session state was really changed to error by device removal.
+ */
+ err = wait_event_interruptible_timeout(
+ clt_path->state_wq,
+ con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+ if (err == 0 || err == -ERESTARTSYS) {
+ if (err == 0)
+ err = -ETIMEDOUT;
+ /* Timedout or interrupted */
+ return err;
+ }
+ if (con->cm_err < 0)
+ return con->cm_err;
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
+ /* Device removal */
+ return -ECONNABORTED;
+
+ return 0;
+}
+
+static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ int up;
+
+ /*
+ * We can fire RECONNECTED event only when all paths were
+ * connected on rtrs_clt_open(), then each was disconnected
+ * and the first one connected again. That's why this nasty
+ * game with counter value.
+ */
+
+ mutex_lock(&clt->paths_ev_mutex);
+ up = ++clt->paths_up;
+ /*
+ * Here it is safe to access paths num directly since up counter
+ * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
+ * in progress, thus paths removals are impossible.
+ */
+ if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
+ clt->paths_up = clt->paths_num;
+ else if (up == 1)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+
+ /* Mark session as established */
+ clt_path->established = true;
+ clt_path->reconnect_attempts = 0;
+ clt_path->stats->reconnects.successful_cnt++;
+}
+
+static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ if (!clt_path->established)
+ return;
+
+ clt_path->established = false;
+ mutex_lock(&clt->paths_ev_mutex);
+ WARN_ON(!clt->paths_up);
+ if (--clt->paths_up == 0)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_con *con;
+ unsigned int cid;
+
+ WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
+
+ /*
+ * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
+ * exactly in between. Start destroying after it finishes.
+ */
+ mutex_lock(&clt_path->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
+
+ /*
+ * All IO paths must observe !CONNECTED state before we
+ * free everything.
+ */
+ synchronize_rcu();
+
+ rtrs_stop_hb(&clt_path->s);
+
+ /*
+ * The order it utterly crucial: firstly disconnect and complete all
+ * rdma requests with error (thus set in_use=false for requests),
+ * then fail outstanding requests checking in_use for each, and
+ * eventually notify upper layer about session disconnection.
+ */
+
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
+ break;
+ con = to_clt_con(clt_path->s.con[cid]);
+ stop_cm(con);
+ }
+ fail_all_outstanding_reqs(clt_path);
+ free_path_reqs(clt_path);
+ rtrs_clt_path_down(clt_path);
+
+ /*
+ * Wait for graceful shutdown, namely when peer side invokes
+ * rdma_disconnect(). 'connected_cnt' is decremented only on
+ * CM events, thus if other side had crashed and hb has detected
+ * something is wrong, here we will stuck for exactly timeout ms,
+ * since CM does not fire anything. That is fine, we are not in
+ * hurry.
+ */
+ wait_event_timeout(clt_path->state_wq,
+ !atomic_read(&clt_path->connected_cnt),
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
+ break;
+ con = to_clt_con(clt_path->s.con[cid]);
+ mutex_lock(&con->con_mutex);
+ destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+}
+
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_clt_path *next;
+ bool wait_for_grace = false;
+ int cpu;
+
+ mutex_lock(&clt->paths_mutex);
+ list_del_rcu(&clt_path->s.entry);
+
+ /* Make sure everybody observes path removal. */
+ synchronize_rcu();
+
+ /*
+ * At this point nobody sees @sess in the list, but still we have
+ * dangling pointer @pcpu_path which _can_ point to @sess. Since
+ * nobody can observe @sess in the list, we guarantee that IO path
+ * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
+ * to @sess, but can never again become @sess.
+ */
+
+ /*
+ * Decrement paths number only after grace period, because
+ * caller of do_each_path() must firstly observe list without
+ * path and only then decremented paths number.
+ *
+ * Otherwise there can be the following situation:
+ * o Two paths exist and IO is coming.
+ * o One path is removed:
+ * CPU#0 CPU#1
+ * do_each_path(): rtrs_clt_remove_path_from_arr():
+ * path = get_next_path()
+ * ^^^ list_del_rcu(path)
+ * [!CONNECTED path] clt->paths_num--
+ * ^^^^^^^^^
+ * load clt->paths_num from 2 to 1
+ * ^^^^^^^^^
+ * sees 1
+ *
+ * path is observed as !CONNECTED, but do_each_path() loop
+ * ends, because expression i < clt->paths_num is false.
+ */
+ clt->paths_num--;
+
+ /*
+ * Get @next connection from current @sess which is going to be
+ * removed. If @sess is the last element, then @next is NULL.
+ */
+ rcu_read_lock();
+ next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
+ rcu_read_unlock();
+
+ /*
+ * @pcpu paths can still point to the path which is going to be
+ * removed, so change the pointer manually.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rtrs_clt_path __rcu **ppcpu_path;
+
+ ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
+ if (rcu_dereference_protected(*ppcpu_path,
+ lockdep_is_held(&clt->paths_mutex)) != clt_path)
+ /*
+ * synchronize_rcu() was called just after deleting
+ * entry from the list, thus IO code path cannot
+ * change pointer back to the pointer which is going
+ * to be removed, we are safe here.
+ */
+ continue;
+
+ /*
+ * We race with IO code path, which also changes pointer,
+ * thus we have to be careful not to overwrite it.
+ */
+ if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
+ next))
+ /*
+ * @ppcpu_path was successfully replaced with @next,
+ * that means that someone could also pick up the
+ * @sess and dereferencing it right now, so wait for
+ * a grace period is required.
+ */
+ wait_for_grace = true;
+ }
+ if (wait_for_grace)
+ synchronize_rcu();
+
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ mutex_lock(&clt->paths_mutex);
+ clt->paths_num++;
+
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_close_work(struct work_struct *work)
+{
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = container_of(work, struct rtrs_clt_path, close_work);
+
+ cancel_work_sync(&clt_path->err_recovery_work);
+ cancel_delayed_work_sync(&clt_path->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
+}
+
+static int init_conns(struct rtrs_clt_path *clt_path)
+{
+ unsigned int cid;
+ int err, i;
+
+ /*
+ * On every new session connections increase reconnect counter
+ * to avoid clashes with previous sessions not yet closed
+ * sessions on a server side.
+ */
+ clt_path->s.recon_cnt++;
+
+ /* Establish all RDMA connections */
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ err = create_con(clt_path, cid);
+ if (err)
+ goto destroy;
+
+ err = create_cm(to_clt_con(clt_path->s.con[cid]));
+ if (err)
+ goto destroy;
+ }
+ err = alloc_path_reqs(clt_path);
+ if (err)
+ goto destroy;
+
+ return 0;
+
+destroy:
+ /* Make sure we do the cleanup in the order they are created */
+ for (i = 0; i <= cid; i++) {
+ struct rtrs_clt_con *con;
+
+ if (!clt_path->s.con[i])
+ break;
+
+ con = to_clt_con(clt_path->s.con[i]);
+ if (con->c.cm_id) {
+ stop_cm(con);
+ mutex_lock(&con->con_mutex);
+ destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
+ destroy_cm(con);
+ }
+ destroy_con(con);
+ }
+ /*
+ * If we've never taken async path and got an error, say,
+ * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
+ * manually to keep reconnecting.
+ */
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
+
+ return err;
+}
+
+static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
+ return;
+ }
+
+ rtrs_clt_update_wc_stats(con);
+}
+
+static int process_info_rsp(struct rtrs_clt_path *clt_path,
+ const struct rtrs_msg_info_rsp *msg)
+{
+ unsigned int sg_cnt, total_len;
+ int i, sgi;
+
+ sg_cnt = le16_to_cpu(msg->sg_cnt);
+ if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg_cnt %d, is not multiple\n",
+ sg_cnt);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and
+ * the offset inside the memory chunk.
+ */
+ if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
+ MAX_IMM_PAYL_BITS) {
+ rtrs_err(clt_path->clt,
+ "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
+ MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
+ return -EINVAL;
+ }
+ total_len = 0;
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
+ const struct rtrs_sg_desc *desc = &msg->desc[sgi];
+ u32 len, rkey;
+ u64 addr;
+
+ addr = le64_to_cpu(desc->addr);
+ rkey = le32_to_cpu(desc->key);
+ len = le32_to_cpu(desc->len);
+
+ total_len += len;
+
+ if (!len || (len % clt_path->chunk_size)) {
+ rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
+ sgi,
+ len);
+ return -EINVAL;
+ }
+ for ( ; len && i < clt_path->queue_depth; i++) {
+ clt_path->rbufs[i].addr = addr;
+ clt_path->rbufs[i].rkey = rkey;
+
+ len -= clt_path->chunk_size;
+ addr += clt_path->chunk_size;
+ }
+ }
+ /* Sanity check */
+ if (sgi != sg_cnt || i != clt_path->queue_depth) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg vector, not fully mapped\n");
+ return -EINVAL;
+ }
+ if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
+ rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_msg_info_rsp *msg;
+ enum rtrs_clt_state state;
+ struct rtrs_iu *iu;
+ size_t rx_sz;
+ int err;
+
+ state = RTRS_CLT_CONNECTING_ERR;
+
+ WARN_ON(con->c.cid);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto out;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (wc->byte_len < sizeof(*msg)) {
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
+ rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ rx_sz = sizeof(*msg);
+ rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
+ if (wc->byte_len < rx_sz) {
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ err = process_info_rsp(clt_path, msg);
+ if (err)
+ goto out;
+
+ err = post_recv_path(clt_path);
+ if (err)
+ goto out;
+
+ state = RTRS_CLT_CONNECTED;
+
+out:
+ rtrs_clt_update_wc_stats(con);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
+ rtrs_clt_change_state_get_old(clt_path, state, NULL);
+}
+
+static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
+{
+ struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *tx_iu, *rx_iu;
+ size_t rx_sz;
+ int err;
+
+ rx_sz = sizeof(struct rtrs_msg_info_rsp);
+ rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
+
+ tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
+ clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
+ rtrs_clt_info_req_done);
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
+ if (!tx_iu || !rx_iu) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
+ if (err) {
+ rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ goto out;
+ }
+ rx_iu = NULL;
+
+ msg = tx_iu->buf;
+ msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
+ memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
+
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info request */
+ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
+ if (err) {
+ rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ goto out;
+ }
+ tx_iu = NULL;
+
+ /* Wait for state change */
+ wait_event_interruptible_timeout(clt_path->state_wq,
+ clt_path->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(
+ RTRS_CONNECT_TIMEOUT_MS));
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
+ err = -ECONNRESET;
+ else
+ err = -ETIMEDOUT;
+ }
+
+out:
+ if (tx_iu)
+ rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
+ if (rx_iu)
+ rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
+ if (err)
+ /* If we've never taken async path because of malloc problems */
+ rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_CONNECTING_ERR, NULL);
+
+ return err;
+}
+
+/**
+ * init_path() - establishes all path connections and does handshake
+ * @clt_path: client path.
+ * In case of error full close or reconnect procedure should be taken,
+ * because reconnect or close async works can be started.
+ */
+static int init_path(struct rtrs_clt_path *clt_path)
+{
+ int err;
+ char str[NAME_MAX];
+ struct rtrs_addr path = {
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
+
+ mutex_lock(&clt_path->init_mutex);
+ err = init_conns(clt_path);
+ if (err) {
+ rtrs_err(clt_path->clt,
+ "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
+ str, clt_path->hca_name, clt_path->hca_port);
+ goto out;
+ }
+ err = rtrs_send_path_info(clt_path);
+ if (err) {
+ rtrs_err(clt_path->clt,
+ "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
+ err, str, clt_path->hca_name, clt_path->hca_port);
+ goto out;
+ }
+ rtrs_clt_path_up(clt_path);
+ rtrs_start_hb(&clt_path->s);
+out:
+ mutex_unlock(&clt_path->init_mutex);
+
+ return err;
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work)
+{
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
+ int err;
+
+ clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
+ reconnect_dwork);
+ clt = clt_path->clt;
+
+ trace_rtrs_clt_reconnect_work(clt_path);
+
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
+ return;
+
+ if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a path completely if max attempts is reached */
+ rtrs_clt_close_conns(clt_path, false);
+ return;
+ }
+ clt_path->reconnect_attempts++;
+
+ msleep(RTRS_RECONNECT_BACKOFF);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
+ err = init_path(clt_path);
+ if (err)
+ goto reconnect_again;
+ }
+
+ return;
+
+reconnect_again:
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
+ clt_path->stats->reconnects.fail_cnt++;
+ queue_work(rtrs_wq, &clt_path->err_recovery_work);
+ }
+}
+
+static void rtrs_clt_dev_release(struct device *dev)
+{
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
+
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
+ kfree(clt);
+}
+
+static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
+ u16 port, size_t pdu_sz, void *priv,
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev),
+ unsigned int reconnect_delay_sec,
+ unsigned int max_reconnect_attempts)
+{
+ struct rtrs_clt_sess *clt;
+ int err;
+
+ if (!paths_num || paths_num > MAX_PATHS_NUM)
+ return ERR_PTR(-EINVAL);
+
+ if (strlen(sessname) >= sizeof(clt->sessname))
+ return ERR_PTR(-EINVAL);
+
+ clt = kzalloc(sizeof(*clt), GFP_KERNEL);
+ if (!clt)
+ return ERR_PTR(-ENOMEM);
+
+ clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
+ if (!clt->pcpu_path) {
+ kfree(clt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
+ uuid_gen(&clt->paths_uuid);
+ INIT_LIST_HEAD_RCU(&clt->paths_list);
+ clt->paths_num = paths_num;
+ clt->paths_up = MAX_PATHS_NUM;
+ clt->port = port;
+ clt->pdu_sz = pdu_sz;
+ clt->max_segments = RTRS_MAX_SEGMENTS;
+ clt->reconnect_delay_sec = reconnect_delay_sec;
+ clt->max_reconnect_attempts = max_reconnect_attempts;
+ clt->priv = priv;
+ clt->link_ev = link_ev;
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ strscpy(clt->sessname, sessname, sizeof(clt->sessname));
+ init_waitqueue_head(&clt->permits_wait);
+ mutex_init(&clt->paths_ev_mutex);
+ mutex_init(&clt->paths_mutex);
+ device_initialize(&clt->dev);
+
+ err = dev_set_name(&clt->dev, "%s", sessname);
+ if (err)
+ goto err_put;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&clt->dev, true);
+ err = device_add(&clt->dev);
+ if (err)
+ goto err_put;
+
+ clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ if (!clt->kobj_paths) {
+ err = -ENOMEM;
+ goto err_del;
+ }
+ err = rtrs_clt_create_sysfs_root_files(clt);
+ if (err) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ goto err_del;
+ }
+ dev_set_uevent_suppress(&clt->dev, false);
+ kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+
+ return clt;
+err_del:
+ device_del(&clt->dev);
+err_put:
+ free_percpu(clt->pcpu_path);
+ put_device(&clt->dev);
+ return ERR_PTR(err);
+}
+
+static void free_clt(struct rtrs_clt_sess *clt)
+{
+ free_percpu(clt->pcpu_path);
+
+ /*
+ * release callback will free clt and destroy mutexes in last put
+ */
+ device_unregister(&clt->dev);
+}
+
+/**
+ * rtrs_clt_open() - Open a path to an RTRS server
+ * @ops: holds the link event callback and the private pointer.
+ * @pathname: name of the path to an RTRS server
+ * @paths: Paths to be established defined by their src and dst addresses
+ * @paths_num: Number of elements in the @paths array
+ * @port: port to be used by the RTRS session
+ * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
+ * @reconnect_delay_sec: time between reconnect tries
+ * @max_reconnect_attempts: Number of times to reconnect on error before giving
+ * up, 0 for * disabled, -1 for forever
+ * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
+ *
+ * Starts session establishment with the rtrs_server. The function can block
+ * up to ~2000ms before it returns.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
+ const struct rtrs_addr *paths,
+ size_t paths_num, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ s16 max_reconnect_attempts, u32 nr_poll_queues)
+{
+ struct rtrs_clt_path *clt_path, *tmp;
+ struct rtrs_clt_sess *clt;
+ int err, i;
+
+ if (strchr(pathname, '/') || strchr(pathname, '.')) {
+ pr_err("pathname cannot contain / and .\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
+ ops->link_ev,
+ reconnect_delay_sec,
+ max_reconnect_attempts);
+ if (IS_ERR(clt)) {
+ err = PTR_ERR(clt);
+ goto out;
+ }
+ for (i = 0; i < paths_num; i++) {
+ struct rtrs_clt_path *clt_path;
+
+ clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
+ nr_poll_queues);
+ if (IS_ERR(clt_path)) {
+ err = PTR_ERR(clt_path);
+ goto close_all_path;
+ }
+ if (!i)
+ clt_path->for_new_clt = 1;
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
+
+ err = init_path(clt_path);
+ if (err) {
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
+ }
+
+ err = rtrs_clt_create_path_files(clt_path);
+ if (err) {
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
+ }
+ }
+ err = alloc_permits(clt);
+ if (err)
+ goto close_all_path;
+
+ return clt;
+
+close_all_path:
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ rtrs_clt_close_conns(clt_path, true);
+ kobject_put(&clt_path->kobj);
+ }
+ rtrs_clt_destroy_sysfs_root(clt);
+ free_clt(clt);
+
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(rtrs_clt_open);
+
+/**
+ * rtrs_clt_close() - Close a path
+ * @clt: Session handle. Session is freed upon return.
+ */
+void rtrs_clt_close(struct rtrs_clt_sess *clt)
+{
+ struct rtrs_clt_path *clt_path, *tmp;
+
+ /* Firstly forbid sysfs access */
+ rtrs_clt_destroy_sysfs_root(clt);
+
+ /* Now it is safe to iterate over all paths without locks */
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_close_conns(clt_path, true);
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ kobject_put(&clt_path->kobj);
+ }
+ free_permits(clt);
+ free_clt(clt);
+}
+EXPORT_SYMBOL(rtrs_clt_close);
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
+{
+ enum rtrs_clt_state old_state;
+ int err = -EBUSY;
+ bool changed;
+
+ changed = rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_RECONNECTING,
+ &old_state);
+ if (changed) {
+ clt_path->reconnect_attempts = 0;
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
+ }
+ if (changed || old_state == RTRS_CLT_RECONNECTING) {
+ /*
+ * flush_delayed_work() queues pending work for immediate
+ * execution, so do the flush if we have queued something
+ * right now or work is pending.
+ */
+ flush_delayed_work(&clt_path->reconnect_dwork);
+ err = (READ_ONCE(clt_path->state) ==
+ RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
+ }
+
+ return err;
+}
+
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
+ const struct attribute *sysfs_self)
+{
+ enum rtrs_clt_state old_state;
+ bool changed;
+
+ /*
+ * Continue stopping path till state was changed to DEAD or
+ * state was observed as DEAD:
+ * 1. State was changed to DEAD - we were fast and nobody
+ * invoked rtrs_clt_reconnect(), which can again start
+ * reconnecting.
+ * 2. State was observed as DEAD - we have someone in parallel
+ * removing the path.
+ */
+ do {
+ rtrs_clt_close_conns(clt_path, true);
+ changed = rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_DEAD,
+ &old_state);
+ } while (!changed && old_state != RTRS_CLT_DEAD);
+
+ if (changed) {
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_destroy_path_files(clt_path, sysfs_self);
+ kobject_put(&clt_path->kobj);
+ }
+
+ return 0;
+}
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
+{
+ clt->max_reconnect_attempts = (unsigned int)value;
+}
+
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
+{
+ return (int)clt->max_reconnect_attempts;
+}
+
+/**
+ * rtrs_clt_request() - Request data transfer to/from server via RDMA.
+ *
+ * @dir: READ/WRITE
+ * @ops: callback function to be called as confirmation, and the pointer.
+ * @clt: Session
+ * @permit: Preallocated permit
+ * @vec: Message that is sent to server together with the request.
+ * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
+ * Since the msg is copied internally it can be allocated on stack.
+ * @nr: Number of elements in @vec.
+ * @data_len: length of data sent to/from server
+ * @sg: Pages to be sent/received to/from server.
+ * @sg_cnt: Number of elements in the @sg
+ *
+ * Return:
+ * 0: Success
+ * <0: Error
+ *
+ * On dir=READ rtrs client will request a data transfer from Server to client.
+ * The data that the server will respond with will be stored in @sg when
+ * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
+ * On dir=WRITE rtrs client will rdma write data in sg to server side.
+ */
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt_path *clt_path;
+
+ enum dma_data_direction dma_dir;
+ int err = -ECONNABORTED, i;
+ size_t usr_len, hdr_len;
+ struct path_it it;
+
+ /* Get kvec length */
+ for (i = 0, usr_len = 0; i < nr; i++)
+ usr_len += vec[i].iov_len;
+
+ if (dir == READ) {
+ hdr_len = sizeof(struct rtrs_msg_rdma_read) +
+ sg_cnt * sizeof(struct rtrs_sg_desc);
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ hdr_len = sizeof(struct rtrs_msg_rdma_write);
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ if (usr_len + hdr_len > clt_path->max_hdr_size) {
+ rtrs_wrn_rl(clt_path->clt,
+ "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
+ dir == READ ? "Read" : "Write",
+ usr_len, hdr_len, clt_path->max_hdr_size);
+ err = -EMSGSIZE;
+ break;
+ }
+ req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
+ vec, usr_len, sg, sg_cnt, data_len,
+ dma_dir);
+ if (dir == READ)
+ err = rtrs_clt_read_req(req);
+ else
+ err = rtrs_clt_write_req(req);
+ if (err) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_clt_request);
+
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
+{
+ /* If no path, return -1 for block layer not to try again */
+ int cnt = -1;
+ struct rtrs_con *con;
+ struct rtrs_clt_path *clt_path;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
+ continue;
+
+ con = clt_path->s.con[index + 1];
+ cnt = ib_process_cq_direct(con->cq, -1);
+ if (cnt)
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return cnt;
+}
+EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
+
+/**
+ * rtrs_clt_query() - queries RTRS session attributes
+ *@clt: session pointer
+ *@attr: query results for session attributes.
+ * Returns:
+ * 0 on success
+ * -ECOMM no connection to the server
+ */
+int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
+{
+ if (!rtrs_clt_is_connected(clt))
+ return -ECOMM;
+
+ attr->queue_depth = clt->queue_depth;
+ attr->max_segments = clt->max_segments;
+ /* Cap max_io_size to min of remote buffer size and the fr pages */
+ attr->max_io_size = min_t(int, clt->max_io_size,
+ clt->max_segments * SZ_4K);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtrs_clt_query);
+
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt_path *clt_path;
+ int err;
+
+ clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
+ if (IS_ERR(clt_path))
+ return PTR_ERR(clt_path);
+
+ mutex_lock(&clt->paths_mutex);
+ if (clt->paths_num == 0) {
+ /*
+ * When all the paths are removed for a session,
+ * the addition of the first path is like a new session for
+ * the storage server
+ */
+ clt_path->for_new_clt = 1;
+ }
+
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * It is totally safe to add path in CONNECTING state: coming
+ * IO will never grab it. Also it is very important to add
+ * path before init, since init fires LINK_CONNECTED event.
+ */
+ rtrs_clt_add_path_to_arr(clt_path);
+
+ err = init_path(clt_path);
+ if (err)
+ goto close_path;
+
+ err = rtrs_clt_create_path_files(clt_path);
+ if (err)
+ goto close_path;
+
+ return 0;
+
+close_path:
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+
+ return err;
+}
+
+static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ if (!(dev->ib_dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ pr_err("Memory registrations not supported.\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_clt_ib_dev_init
+};
+
+static int __init rtrs_client_init(void)
+{
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
+ if (IS_ERR(rtrs_clt_dev_class)) {
+ pr_err("Failed to create rtrs-client dev class\n");
+ return PTR_ERR(rtrs_clt_dev_class);
+ }
+ rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
+ if (!rtrs_wq) {
+ class_destroy(rtrs_clt_dev_class);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit rtrs_client_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_clt_dev_class);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_client_init);
+module_exit(rtrs_client_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
new file mode 100644
index 000000000..f848c0392
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_CLT_H
+#define RTRS_CLT_H
+
+#include <linux/device.h>
+#include "rtrs-pri.h"
+
+/**
+ * enum rtrs_clt_state - Client states.
+ */
+enum rtrs_clt_state {
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR,
+ RTRS_CLT_RECONNECTING,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_CLOSING,
+ RTRS_CLT_CLOSED,
+ RTRS_CLT_DEAD,
+};
+
+enum rtrs_mp_policy {
+ MP_POLICY_RR,
+ MP_POLICY_MIN_INFLIGHT,
+ MP_POLICY_MIN_LATENCY,
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_reconnects {
+ int successful_cnt;
+ int fail_cnt;
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_cpu_migr {
+ atomic_t from;
+ int to;
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-client for details
+ */
+struct rtrs_clt_stats_rdma {
+ struct {
+ u64 cnt;
+ u64 size_total;
+ } dir[2];
+
+ u64 failover_cnt;
+};
+
+struct rtrs_clt_stats_pcpu {
+ struct rtrs_clt_stats_cpu_migr cpu_migr;
+ struct rtrs_clt_stats_rdma rdma;
+};
+
+struct rtrs_clt_stats {
+ struct kobject kobj_stats;
+ struct rtrs_clt_stats_pcpu __percpu *pcpu_stats;
+ struct rtrs_clt_stats_reconnects reconnects;
+ atomic_t inflight;
+};
+
+struct rtrs_clt_con {
+ struct rtrs_con c;
+ struct rtrs_iu *rsp_ius;
+ u32 queue_num;
+ unsigned int cpu;
+ struct mutex con_mutex;
+ int cm_err;
+};
+
+/**
+ * rtrs_permit - permits the memory allocation for future RDMA operation.
+ * Combine with irq pinning to keep IO on same CPU.
+ */
+struct rtrs_permit {
+ enum rtrs_clt_con_type con_type;
+ unsigned int cpu_id;
+ unsigned int mem_id;
+ unsigned int mem_off;
+};
+
+/**
+ * rtrs_clt_io_req - describes one inflight IO request
+ */
+struct rtrs_clt_io_req {
+ struct list_head list;
+ struct rtrs_iu *iu;
+ struct scatterlist *sglist; /* list holding user data */
+ unsigned int sg_cnt;
+ unsigned int sg_size;
+ unsigned int data_len;
+ unsigned int usr_len;
+ void *priv;
+ bool in_use;
+ enum rtrs_mp_policy mp_policy;
+ struct rtrs_clt_con *con;
+ struct rtrs_sg_desc *desc;
+ struct ib_sge *sge;
+ struct rtrs_permit *permit;
+ enum dma_data_direction dir;
+ void (*conf)(void *priv, int errno);
+ unsigned long start_jiffies;
+
+ struct ib_mr *mr;
+ struct ib_cqe inv_cqe;
+ struct completion inv_comp;
+ int inv_errno;
+ bool need_inv_comp;
+ bool need_inv;
+ refcount_t ref;
+};
+
+struct rtrs_rbuf {
+ u64 addr;
+ u32 rkey;
+};
+
+struct rtrs_clt_path {
+ struct rtrs_path s;
+ struct rtrs_clt_sess *clt;
+ wait_queue_head_t state_wq;
+ enum rtrs_clt_state state;
+ atomic_t connected_cnt;
+ struct mutex init_mutex;
+ struct rtrs_clt_io_req *reqs;
+ struct delayed_work reconnect_dwork;
+ struct work_struct close_work;
+ struct work_struct err_recovery_work;
+ unsigned int reconnect_attempts;
+ bool established;
+ struct rtrs_rbuf *rbufs;
+ size_t max_io_size;
+ u32 max_hdr_size;
+ u32 chunk_size;
+ size_t queue_depth;
+ u32 max_pages_per_mr;
+ u32 flags;
+ struct kobject kobj;
+ u8 for_new_clt;
+ struct rtrs_clt_stats *stats;
+ /* cache hca_port and hca_name to display in sysfs */
+ u8 hca_port;
+ char hca_name[IB_DEVICE_NAME_MAX];
+ struct list_head __percpu
+ *mp_skip_entry;
+};
+
+struct rtrs_clt_sess {
+ struct list_head paths_list; /* rcu protected list */
+ size_t paths_num;
+ struct rtrs_clt_path
+ __rcu * __percpu *pcpu_path;
+ uuid_t paths_uuid;
+ int paths_up;
+ struct mutex paths_mutex;
+ struct mutex paths_ev_mutex;
+ char sessname[NAME_MAX];
+ u16 port;
+ unsigned int max_reconnect_attempts;
+ unsigned int reconnect_delay_sec;
+ unsigned int max_segments;
+ void *permits;
+ unsigned long *permits_map;
+ size_t queue_depth;
+ size_t max_io_size;
+ wait_queue_head_t permits_wait;
+ size_t pdu_sz;
+ void *priv;
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev);
+ struct device dev;
+ struct kobject *kobj_paths;
+ enum rtrs_mp_policy mp_policy;
+};
+
+static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_clt_con, c);
+}
+
+static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s)
+{
+ return container_of(s, struct rtrs_clt_path, s);
+}
+
+static inline int permit_size(struct rtrs_clt_sess *clt)
+{
+ return sizeof(struct rtrs_permit) + clt->pdu_sz;
+}
+
+static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt,
+ int idx)
+{
+ return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
+}
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path);
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
+ struct rtrs_addr *addr);
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
+ const struct attribute *sysfs_self);
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
+void free_path(struct rtrs_clt_path *clt_path);
+
+/* rtrs-clt-stats.c */
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats);
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *s);
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con);
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
+
+int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
+ bool enable);
+ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
+ char *page);
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page);
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
+ char *page);
+
+/* rtrs-clt-sysfs.c */
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt);
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt);
+
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path);
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
+ const struct attribute *sysfs_self);
+
+#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-log.h b/drivers/infiniband/ulp/rtrs/rtrs-log.h
new file mode 100644
index 000000000..53c785b99
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-log.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_LOG_H
+#define RTRS_LOG_H
+
+#define rtrs_log(fn, obj, fmt, ...) \
+ fn("<%s>: " fmt, obj->sessname, ##__VA_ARGS__)
+
+#define rtrs_err(obj, fmt, ...) \
+ rtrs_log(pr_err, obj, fmt, ##__VA_ARGS__)
+#define rtrs_err_rl(obj, fmt, ...) \
+ rtrs_log(pr_err_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn(obj, fmt, ...) \
+ rtrs_log(pr_warn, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn_rl(obj, fmt, ...) \
+ rtrs_log(pr_warn_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info(obj, fmt, ...) \
+ rtrs_log(pr_info, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info_rl(obj, fmt, ...) \
+ rtrs_log(pr_info_ratelimited, obj, fmt, ##__VA_ARGS__)
+
+#endif /* RTRS_LOG_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
new file mode 100644
index 000000000..a2420eeca
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_PRI_H
+#define RTRS_PRI_H
+
+#include <linux/uuid.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib.h>
+
+#include "rtrs.h"
+
+#define RTRS_PROTO_VER_MAJOR 2
+#define RTRS_PROTO_VER_MINOR 0
+
+#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
+ __stringify(RTRS_PROTO_VER_MINOR)
+
+/*
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65535 (2^16 - 1) in theory
+ * since queue_depth in rtrs_msg_conn_rsp is defined as le16.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65535 and it depends on the system.
+ */
+#define MAX_SESS_QUEUE_DEPTH 65535
+
+enum rtrs_imm_const {
+ MAX_IMM_TYPE_BITS = 4,
+ MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
+ MAX_IMM_PAYL_BITS = 28,
+ MAX_IMM_PAYL_MASK = ((1 << MAX_IMM_PAYL_BITS) - 1),
+};
+
+enum rtrs_imm_type {
+ RTRS_IO_REQ_IMM = 0, /* client to server */
+ RTRS_IO_RSP_IMM = 1, /* server to client */
+ RTRS_IO_RSP_W_INV_IMM = 2, /* server to client */
+
+ RTRS_HB_MSG_IMM = 8, /* HB: HeartBeat */
+ RTRS_HB_ACK_IMM = 9,
+
+ RTRS_LAST_IMM,
+};
+
+enum {
+ SERVICE_CON_QUEUE_DEPTH = 512,
+
+ MAX_PATHS_NUM = 128,
+
+ MIN_CHUNK_SIZE = 8192,
+
+ RTRS_HB_INTERVAL_MS = 5000,
+ RTRS_HB_MISSED_MAX = 5,
+
+ RTRS_MAGIC = 0x1BBD,
+ RTRS_PROTO_VER = (RTRS_PROTO_VER_MAJOR << 8) | RTRS_PROTO_VER_MINOR,
+};
+
+struct rtrs_ib_dev;
+
+struct rtrs_rdma_dev_pd_ops {
+ struct rtrs_ib_dev *(*alloc)(void);
+ void (*free)(struct rtrs_ib_dev *dev);
+ int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
+};
+
+struct rtrs_rdma_dev_pd {
+ struct mutex mutex;
+ struct list_head list;
+ enum ib_pd_flags pd_flags;
+ const struct rtrs_rdma_dev_pd_ops *ops;
+};
+
+struct rtrs_ib_dev {
+ struct ib_device *ib_dev;
+ struct ib_pd *ib_pd;
+ struct kref ref;
+ struct list_head entry;
+ struct rtrs_rdma_dev_pd *pool;
+};
+
+struct rtrs_con {
+ struct rtrs_path *path;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct rdma_cm_id *cm_id;
+ unsigned int cid;
+ int nr_cqe;
+ atomic_t wr_cnt;
+ atomic_t sq_wr_avail;
+};
+
+struct rtrs_path {
+ struct list_head entry;
+ struct sockaddr_storage dst_addr;
+ struct sockaddr_storage src_addr;
+ char sessname[NAME_MAX];
+ uuid_t uuid;
+ struct rtrs_con **con;
+ unsigned int con_num;
+ unsigned int irq_con_num;
+ unsigned int recon_cnt;
+ unsigned int signal_interval;
+ struct rtrs_ib_dev *dev;
+ int dev_ref;
+ struct ib_cqe *hb_cqe;
+ void (*hb_err_handler)(struct rtrs_con *con);
+ struct workqueue_struct *hb_wq;
+ struct delayed_work hb_dwork;
+ unsigned int hb_interval_ms;
+ unsigned int hb_missed_cnt;
+ unsigned int hb_missed_max;
+ ktime_t hb_last_sent;
+ ktime_t hb_cur_latency;
+};
+
+/* rtrs information unit */
+struct rtrs_iu {
+ struct ib_cqe cqe;
+ dma_addr_t dma_addr;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+};
+
+/**
+ * enum rtrs_msg_types - RTRS message types, see also rtrs/README
+ * @RTRS_MSG_INFO_REQ: Client additional info request to the server
+ * @RTRS_MSG_INFO_RSP: Server additional info response to the client
+ * @RTRS_MSG_WRITE: Client writes data per RDMA to server
+ * @RTRS_MSG_READ: Client requests data transfer from server
+ * @RTRS_MSG_RKEY_RSP: Server refreshed rkey for rbuf
+ */
+enum rtrs_msg_types {
+ RTRS_MSG_INFO_REQ,
+ RTRS_MSG_INFO_RSP,
+ RTRS_MSG_WRITE,
+ RTRS_MSG_READ,
+ RTRS_MSG_RKEY_RSP,
+};
+
+/**
+ * enum rtrs_msg_flags - RTRS message flags.
+ * @RTRS_NEED_INVAL: Send invalidation in response.
+ * @RTRS_MSG_NEW_RKEY_F: Send refreshed rkey in response.
+ */
+enum rtrs_msg_flags {
+ RTRS_MSG_NEED_INVAL_F = 1 << 0,
+ RTRS_MSG_NEW_RKEY_F = 1 << 1,
+};
+
+/**
+ * struct rtrs_sg_desc - RDMA-Buffer entry description
+ * @addr: Address of RDMA destination buffer
+ * @key: Authorization rkey to write to the buffer
+ * @len: Size of the buffer
+ */
+struct rtrs_sg_desc {
+ __le64 addr;
+ __le32 key;
+ __le32 len;
+};
+
+/**
+ * struct rtrs_msg_conn_req - Client connection request to the server
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @cid: Current connection id
+ * @cid_num: Number of connections per session
+ * @recon_cnt: Reconnections counter
+ * @sess_uuid: UUID of a session (path)
+ * @paths_uuid: UUID of a group of sessions (paths)
+ *
+ * NOTE: max size 56 bytes, see man rdma_connect().
+ */
+struct rtrs_msg_conn_req {
+ /* Is set to 0 by cma.c in case of AF_IB, do not touch that.
+ * see https://www.spinics.net/lists/linux-rdma/msg22397.html
+ */
+ u8 __cma_version;
+ /* On sender side that should be set to 0, or cma_save_ip_info()
+ * extract garbage and will fail.
+ */
+ u8 __ip_version;
+ __le16 magic;
+ __le16 version;
+ __le16 cid;
+ __le16 cid_num;
+ __le16 recon_cnt;
+ uuid_t sess_uuid;
+ uuid_t paths_uuid;
+ u8 first_conn : 1;
+ u8 reserved_bits : 7;
+ u8 reserved[11];
+};
+
+/**
+ * struct rtrs_msg_conn_rsp - Server connection response to the client
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @errno: If rdma_accept() then 0, if rdma_reject() indicates error
+ * @queue_depth: max inflight messages (queue-depth) in this session
+ * @max_io_size: max io size server supports
+ * @max_hdr_size: max msg header size server supports
+ *
+ * NOTE: size is 56 bytes, max possible is 136 bytes, see man rdma_accept().
+ */
+struct rtrs_msg_conn_rsp {
+ __le16 magic;
+ __le16 version;
+ __le16 errno;
+ __le16 queue_depth;
+ __le32 max_io_size;
+ __le32 max_hdr_size;
+ __le32 flags;
+ u8 reserved[36];
+};
+
+/**
+ * struct rtrs_msg_info_req
+ * @type: @RTRS_MSG_INFO_REQ
+ * @pathname: Path name chosen by client
+ */
+struct rtrs_msg_info_req {
+ __le16 type;
+ u8 pathname[NAME_MAX];
+ u8 reserved[15];
+};
+
+/**
+ * struct rtrs_msg_info_rsp
+ * @type: @RTRS_MSG_INFO_RSP
+ * @sg_cnt: Number of @desc entries
+ * @desc: RDMA buffers where the client can write to server
+ */
+struct rtrs_msg_info_rsp {
+ __le16 type;
+ __le16 sg_cnt;
+ u8 reserved[4];
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct rtrs_msg_rkey_rsp
+ * @type: @RTRS_MSG_RKEY_RSP
+ * @buf_id: RDMA buf_id of the new rkey
+ * @rkey: new remote key for RDMA buffers id from server
+ */
+struct rtrs_msg_rkey_rsp {
+ __le16 type;
+ __le16 buf_id;
+ __le32 rkey;
+};
+
+/**
+ * struct rtrs_msg_rdma_read - RDMA data transfer request from client
+ * @type: always @RTRS_MSG_READ
+ * @usr_len: length of user payload
+ * @sg_cnt: number of @desc entries
+ * @desc: RDMA buffers where the server can write the result to
+ */
+struct rtrs_msg_rdma_read {
+ __le16 type;
+ __le16 usr_len;
+ __le16 flags;
+ __le16 sg_cnt;
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct_msg_rdma_write - Message transferred to server with RDMA-Write
+ * @type: always @RTRS_MSG_WRITE
+ * @usr_len: length of user payload
+ */
+struct rtrs_msg_rdma_write {
+ __le16 type;
+ __le16 usr_len;
+};
+
+/**
+ * struct_msg_rdma_hdr - header for read or write request
+ * @type: @RTRS_MSG_WRITE | @RTRS_MSG_READ
+ */
+struct rtrs_msg_rdma_hdr {
+ __le16 type;
+};
+
+/* rtrs.c */
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_num, size_t size, gfp_t t,
+ struct ib_device *dev, enum dma_data_direction,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc));
+void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_num);
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head);
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head,
+ struct ib_send_wr *tail);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
+
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, int nr_cqe,
+ u32 max_send_wr, u32 max_recv_wr,
+ enum ib_poll_context poll_ctx);
+void rtrs_cq_qp_destroy(struct rtrs_con *con);
+
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq);
+void rtrs_start_hb(struct rtrs_path *path);
+void rtrs_stop_hb(struct rtrs_path *path);
+void rtrs_send_hb_ack(struct rtrs_path *path);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool);
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool);
+
+struct rtrs_ib_dev *rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool);
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev);
+
+static inline u32 rtrs_to_imm(u32 type, u32 payload)
+{
+ BUILD_BUG_ON(MAX_IMM_PAYL_BITS + MAX_IMM_TYPE_BITS != 32);
+ BUILD_BUG_ON(RTRS_LAST_IMM > (1<<MAX_IMM_TYPE_BITS));
+ return ((type & MAX_IMM_TYPE_MASK) << MAX_IMM_PAYL_BITS) |
+ (payload & MAX_IMM_PAYL_MASK);
+}
+
+static inline void rtrs_from_imm(u32 imm, u32 *type, u32 *payload)
+{
+ *payload = imm & MAX_IMM_PAYL_MASK;
+ *type = imm >> MAX_IMM_PAYL_BITS;
+}
+
+static inline u32 rtrs_to_io_req_imm(u32 addr)
+{
+ return rtrs_to_imm(RTRS_IO_REQ_IMM, addr);
+}
+
+static inline u32 rtrs_to_io_rsp_imm(u32 msg_id, int errno, bool w_inval)
+{
+ enum rtrs_imm_type type;
+ u32 payload;
+
+ /* 9 bits for errno, 19 bits for msg_id */
+ payload = (abs(errno) & 0x1ff) << 19 | (msg_id & 0x7ffff);
+ type = w_inval ? RTRS_IO_RSP_W_INV_IMM : RTRS_IO_RSP_IMM;
+
+ return rtrs_to_imm(type, payload);
+}
+
+static inline void rtrs_from_io_rsp_imm(u32 payload, u32 *msg_id, int *errno)
+{
+ /* 9 bits for errno, 19 bits for msg_id */
+ *msg_id = payload & 0x7ffff;
+ *errno = -(int)((payload >> 19) & 0x1ff);
+}
+
+#define STAT_STORE_FUNC(type, set_value, reset) \
+static ssize_t set_value##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int ret = -EINVAL; \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ if (sysfs_streq(buf, "1")) \
+ ret = reset(stats, true); \
+ else if (sysfs_streq(buf, "0")) \
+ ret = reset(stats, false); \
+ if (ret) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define STAT_SHOW_FUNC(type, get_value, print) \
+static ssize_t get_value##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ return print(stats, page); \
+}
+
+#define STAT_ATTR(type, stat, print, reset) \
+STAT_STORE_FUNC(type, stat, reset) \
+STAT_SHOW_FUNC(type, stat, print) \
+static struct kobj_attribute stat##_attr = __ATTR_RW(stat)
+
+#endif /* RTRS_PRI_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
new file mode 100644
index 000000000..2aff1213a
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-srv.h"
+
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
+{
+ if (enable) {
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+ memset(r, 0, sizeof(*r));
+ }
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
+{
+ int cpu;
+ struct rtrs_srv_stats_rdma_stats sum;
+ struct rtrs_srv_stats_rdma_stats *r;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = per_cpu_ptr(stats->rdma_stats, cpu);
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ }
+
+ return sysfs_emit(page, "%llu %llu %llu %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total);
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
new file mode 100644
index 000000000..2a3c9ac64
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+
+static void rtrs_srv_release(struct kobject *kobj)
+{
+ struct rtrs_srv_path *srv_path;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ kfree(srv_path);
+}
+
+static struct kobj_type ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_release,
+};
+
+static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
+}
+
+static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_path *s;
+ char str[MAXHOSTNAMELEN];
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ s = &srv_path->s;
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(s, "%s: invalid value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+
+ sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str,
+ sizeof(str));
+
+ rtrs_info(s, "disconnect for path %s requested\n", str);
+ /* first remove sysfs itself to avoid deadlock */
+ sysfs_remove_file_self(&srv_path->kobj, &attr->attr);
+ close_path(srv_path);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_srv_disconnect_attr =
+ __ATTR(disconnect, 0644,
+ rtrs_srv_disconnect_show, rtrs_srv_disconnect_store);
+
+static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_con *usr_con;
+
+ srv_path = container_of(kobj, typeof(*srv_path), kobj);
+ usr_con = srv_path->s.con[0];
+
+ return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
+}
+
+static struct kobj_attribute rtrs_srv_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_srv_hca_port_show, NULL);
+
+static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+
+ return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name);
+}
+
+static struct kobj_attribute rtrs_srv_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_srv_hca_name_show, NULL);
+
+static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+ int cnt;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + sysfs_emit_at(page, cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_srv_src_addr_show, NULL);
+
+static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_path *srv_path;
+ int len;
+
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page,
+ PAGE_SIZE);
+ len += sysfs_emit_at(page, len, "\n");
+ return len;
+}
+
+static struct kobj_attribute rtrs_srv_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
+
+static struct attribute *rtrs_srv_path_attrs[] = {
+ &rtrs_srv_hca_name_attr.attr,
+ &rtrs_srv_hca_port_attr.attr,
+ &rtrs_srv_src_addr_attr.attr,
+ &rtrs_srv_dst_addr_attr.attr,
+ &rtrs_srv_disconnect_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_srv_path_attr_group = {
+ .attrs = rtrs_srv_path_attrs,
+};
+
+STAT_ATTR(struct rtrs_srv_stats, rdma,
+ rtrs_srv_stats_rdma_to_str,
+ rtrs_srv_reset_rdma_stats);
+
+static struct attribute *rtrs_srv_stats_attrs[] = {
+ &rdma_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group rtrs_srv_stats_attr_group = {
+ .attrs = rtrs_srv_stats_attrs,
+};
+
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ int err = 0;
+
+ mutex_lock(&srv->paths_mutex);
+ if (srv->dev_ref++) {
+ /*
+ * Device needs to be registered only on the first session
+ */
+ goto unlock;
+ }
+ srv->dev.class = rtrs_dev_class;
+ err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
+ if (err)
+ goto unlock;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&srv->dev, true);
+ err = device_add(&srv->dev);
+ if (err) {
+ pr_err("device_add(): %d\n", err);
+ put_device(&srv->dev);
+ goto unlock;
+ }
+ srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
+ if (!srv->kobj_paths) {
+ err = -ENOMEM;
+ pr_err("kobject_create_and_add(): %d\n", err);
+ device_del(&srv->dev);
+ put_device(&srv->dev);
+ goto unlock;
+ }
+ dev_set_uevent_suppress(&srv->dev, false);
+ kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
+unlock:
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static void
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+
+ mutex_lock(&srv->paths_mutex);
+ if (!--srv->dev_ref) {
+ kobject_del(srv->kobj_paths);
+ kobject_put(srv->kobj_paths);
+ mutex_unlock(&srv->paths_mutex);
+ device_del(&srv->dev);
+ put_device(&srv->dev);
+ } else {
+ put_device(&srv->dev);
+ mutex_unlock(&srv->paths_mutex);
+ }
+}
+
+static void rtrs_srv_path_stats_release(struct kobject *kobj)
+{
+ struct rtrs_srv_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+
+ free_percpu(stats->rdma_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_path_stats_release,
+};
+
+static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path)
+{
+ int err;
+ struct rtrs_path *s = &srv_path->s;
+
+ err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats,
+ &srv_path->kobj, "stats");
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ kobject_put(&srv_path->stats->kobj_stats);
+ return err;
+ }
+ err = sysfs_create_group(&srv_path->stats->kobj_stats,
+ &rtrs_srv_stats_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
+
+ return err;
+}
+
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
+ char str[NAME_MAX];
+ int err;
+ struct rtrs_addr path = {
+ .src = &srv_path->s.dst_addr,
+ .dst = &srv_path->s.src_addr,
+ };
+
+ rtrs_addr_to_str(&path, str, sizeof(str));
+ err = rtrs_srv_create_once_sysfs_root_folders(srv_path);
+ if (err)
+ return err;
+
+ err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths,
+ "%s", str);
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ goto destroy_root;
+ }
+ err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = rtrs_srv_create_stats_files(srv_path);
+ if (err)
+ goto remove_group;
+
+ return 0;
+
+remove_group:
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+put_kobj:
+ kobject_del(&srv_path->kobj);
+destroy_root:
+ kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
+
+ return err;
+}
+
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
+{
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+ kobject_put(&srv_path->kobj);
+
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
+ }
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
new file mode 100644
index 000000000..29ca59ceb
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-srv-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
new file mode 100644
index 000000000..587d3e033
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_srv
+
+#if !defined(_TRACE_RTRS_SRV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_SRV_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_srv_op;
+struct rtrs_srv_con;
+struct rtrs_srv_path;
+
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSED);
+
+#define show_rtrs_srv_state(x) \
+ __print_symbolic(x, \
+ { RTRS_SRV_CONNECTING, "CONNECTING" }, \
+ { RTRS_SRV_CONNECTED, "CONNECTED" }, \
+ { RTRS_SRV_CLOSING, "CLOSING" }, \
+ { RTRS_SRV_CLOSED, "CLOSED" })
+
+TRACE_EVENT(send_io_resp_imm,
+ TP_PROTO(struct rtrs_srv_op *id,
+ bool need_inval,
+ bool always_invalidate,
+ int errno),
+
+ TP_ARGS(id, need_inval, always_invalidate, errno),
+
+ TP_STRUCT__entry(
+ __field(u8, dir)
+ __field(bool, need_inval)
+ __field(bool, always_invalidate)
+ __field(u32, msg_id)
+ __field(int, wr_cnt)
+ __field(u32, signal_interval)
+ __field(int, state)
+ __field(int, errno)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_srv_con *con = id->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+
+ __entry->dir = id->dir;
+ __entry->state = srv_path->state;
+ __entry->errno = errno;
+ __entry->need_inval = need_inval;
+ __entry->always_invalidate = always_invalidate;
+ __entry->msg_id = id->msg_id;
+ __entry->wr_cnt = atomic_read(&con->c.wr_cnt);
+ __entry->signal_interval = s->signal_interval;
+ memcpy(__entry->sessname, kobject_name(&srv_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("sess='%s' state='%s' dir=%s err='%d' inval='%d' glob-inval='%d' msgid='%u' wrcnt='%d' sig-interval='%u'",
+ __entry->sessname,
+ show_rtrs_srv_state(__entry->state),
+ __print_symbolic(__entry->dir,
+ { READ, "READ" },
+ { WRITE, "WRITE" }),
+ __entry->errno,
+ __entry->need_inval,
+ __entry->always_invalidate,
+ __entry->msg_id,
+ __entry->wr_cnt,
+ __entry->signal_interval
+ )
+);
+
+#endif /* _TRACE_RTRS_SRV_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-srv-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
new file mode 100644
index 000000000..e978ee4bb
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -0,0 +1,2314 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+#include <rdma/ib_cm.h>
+#include <rdma/ib_verbs.h>
+#include "rtrs-srv-trace.h"
+
+MODULE_DESCRIPTION("RDMA Transport Server");
+MODULE_LICENSE("GPL");
+
+/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
+#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
+#define DEFAULT_SESS_QUEUE_DEPTH 512
+#define MAX_HDR_SIZE PAGE_SIZE
+
+static struct rtrs_rdma_dev_pd dev_pd;
+struct class *rtrs_dev_class;
+static struct rtrs_srv_ib_ctx ib_ctx;
+
+static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
+static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
+
+static bool always_invalidate = true;
+module_param(always_invalidate, bool, 0444);
+MODULE_PARM_DESC(always_invalidate,
+ "Invalidate memory registration for contiguous memory regions before accessing.");
+
+module_param_named(max_chunk_size, max_chunk_size, int, 0444);
+MODULE_PARM_DESC(max_chunk_size,
+ "Max size for each IO request, when change the unit is in byte (default: "
+ __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
+
+module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
+MODULE_PARM_DESC(sess_queue_depth,
+ "Number of buffers for pending I/O requests to allocate per session. Maximum: "
+ __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
+ __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
+
+static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
+
+static struct workqueue_struct *rtrs_wq;
+
+static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_srv_con, c);
+}
+
+static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+ bool changed = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&srv_path->state_lock, flags);
+ old_state = srv_path->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+ if (old_state == RTRS_SRV_CONNECTING)
+ changed = true;
+ break;
+ case RTRS_SRV_CLOSING:
+ if (old_state == RTRS_SRV_CONNECTING ||
+ old_state == RTRS_SRV_CONNECTED)
+ changed = true;
+ break;
+ case RTRS_SRV_CLOSED:
+ if (old_state == RTRS_SRV_CLOSING)
+ changed = true;
+ break;
+ default:
+ break;
+ }
+ if (changed)
+ srv_path->state = new_state;
+ spin_unlock_irqrestore(&srv_path->state_lock, flags);
+
+ return changed;
+}
+
+static void free_id(struct rtrs_srv_op *id)
+{
+ if (!id)
+ return;
+ kfree(id);
+}
+
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ int i;
+
+ if (srv_path->ops_ids) {
+ for (i = 0; i < srv->queue_depth; i++)
+ free_id(srv_path->ops_ids[i]);
+ kfree(srv_path->ops_ids);
+ srv_path->ops_ids = NULL;
+ }
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_srv_rdma_done
+};
+
+static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
+{
+ struct rtrs_srv_path *srv_path = container_of(ref,
+ struct rtrs_srv_path,
+ ids_inflight_ref);
+
+ percpu_ref_exit(&srv_path->ids_inflight_ref);
+ complete(&srv_path->complete_done);
+}
+
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_op *id;
+ int i, ret;
+
+ srv_path->ops_ids = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->ops_ids),
+ GFP_KERNEL);
+ if (!srv_path->ops_ids)
+ goto err;
+
+ for (i = 0; i < srv->queue_depth; ++i) {
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ goto err;
+
+ srv_path->ops_ids[i] = id;
+ }
+
+ ret = percpu_ref_init(&srv_path->ids_inflight_ref,
+ rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
+ if (ret) {
+ pr_err("Percpu reference init failed\n");
+ goto err;
+ }
+ init_completion(&srv_path->complete_done);
+
+ return 0;
+
+err:
+ rtrs_srv_free_ops_ids(srv_path);
+ return -ENOMEM;
+}
+
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ percpu_ref_get(&srv_path->ids_inflight_ref);
+}
+
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
+{
+ percpu_ref_put(&srv_path->ids_inflight_ref);
+}
+
+static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(s, "REG MR failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_path(srv_path);
+ return;
+ }
+}
+
+static struct ib_cqe local_reg_cqe = {
+ .done = rtrs_srv_reg_mr_done
+};
+
+static int rdma_write_sg(struct rtrs_srv_op *id)
+{
+ struct rtrs_path *s = id->con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
+ struct rtrs_srv_mr *srv_mr;
+ struct ib_send_wr inv_wr;
+ struct ib_rdma_wr imm_wr;
+ struct ib_rdma_wr *wr = NULL;
+ enum ib_send_flags flags;
+ size_t sg_cnt;
+ int err, offset;
+ bool need_inval;
+ u32 rkey = 0;
+ struct ib_reg_wr rwr;
+ struct ib_sge *plist;
+ struct ib_sge list;
+
+ sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
+ need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
+ if (sg_cnt != 1)
+ return -EINVAL;
+
+ offset = 0;
+
+ wr = &id->tx_wr;
+ plist = &id->tx_sg;
+ plist->addr = dma_addr + offset;
+ plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
+
+ /* WR will fail with length error
+ * if this is 0
+ */
+ if (plist->length == 0) {
+ rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
+ return -EINVAL;
+ }
+
+ plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
+ offset += plist->length;
+
+ wr->wr.sg_list = plist;
+ wr->wr.num_sge = 1;
+ wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
+ wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
+ if (rkey == 0)
+ rkey = wr->rkey;
+ else
+ /* Only one key is actually used */
+ WARN_ON_ONCE(rkey != wr->rkey);
+
+ wr->wr.opcode = IB_WR_RDMA_WRITE;
+ wr->wr.wr_cqe = &io_comp_cqe;
+ wr->wr.ex.imm_data = 0;
+ wr->wr.send_flags = 0;
+
+ if (need_inval && always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &inv_wr;
+ inv_wr.next = &imm_wr.wr;
+ } else if (always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr.wr;
+ } else if (need_inval) {
+ wr->wr.next = &inv_wr;
+ inv_wr.next = &imm_wr.wr;
+ } else {
+ wr->wr.next = &imm_wr.wr;
+ }
+ /*
+ * From time to time we have to post signaled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
+ 0 : IB_SEND_SIGNALED;
+
+ if (need_inval) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.wr_cqe = &io_comp_cqe;
+ inv_wr.send_flags = 0;
+ inv_wr.ex.invalidate_rkey = rkey;
+ }
+
+ imm_wr.wr.next = NULL;
+ if (always_invalidate) {
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &srv_path->mrs[id->msg_id];
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.wr_cqe = &local_reg_cqe;
+ rwr.wr.num_sge = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.wr.send_flags = 0;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.wr.sg_list = &list;
+ imm_wr.wr.num_sge = 1;
+ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.wr.sg_list = NULL;
+ imm_wr.wr.num_sge = 0;
+ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.wr.send_flags = flags;
+ imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ 0, need_inval));
+
+ imm_wr.wr.wr_cqe = &io_comp_cqe;
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
+ offset, DMA_BIDIRECTIONAL);
+
+ err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
+ if (err)
+ rtrs_err(s,
+ "Posting RDMA-Write-Request to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
+ * requests or on successful WRITE request.
+ * @con: the connection to send back result
+ * @id: the id associated with the IO
+ * @errno: the error number of the IO.
+ *
+ * Return 0 on success, errno otherwise.
+ */
+static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ int errno)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct ib_send_wr inv_wr, *wr = NULL;
+ struct ib_rdma_wr imm_wr;
+ struct ib_reg_wr rwr;
+ struct rtrs_srv_mr *srv_mr;
+ bool need_inval = false;
+ enum ib_send_flags flags;
+ u32 imm;
+ int err;
+
+ if (id->dir == READ) {
+ struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
+ size_t sg_cnt;
+
+ need_inval = le16_to_cpu(rd_msg->flags) &
+ RTRS_MSG_NEED_INVAL_F;
+ sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
+
+ if (need_inval) {
+ if (sg_cnt) {
+ inv_wr.wr_cqe = &io_comp_cqe;
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ /* Only one key is actually used */
+ inv_wr.ex.invalidate_rkey =
+ le32_to_cpu(rd_msg->desc[0].key);
+ } else {
+ WARN_ON_ONCE(1);
+ need_inval = false;
+ }
+ }
+ }
+
+ trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
+
+ if (need_inval && always_invalidate) {
+ wr = &inv_wr;
+ inv_wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr.wr;
+ } else if (always_invalidate) {
+ wr = &rwr.wr;
+ rwr.wr.next = &imm_wr.wr;
+ } else if (need_inval) {
+ wr = &inv_wr;
+ inv_wr.next = &imm_wr.wr;
+ } else {
+ wr = &imm_wr.wr;
+ }
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
+ 0 : IB_SEND_SIGNALED;
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ imm_wr.wr.next = NULL;
+ if (always_invalidate) {
+ struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &srv_path->mrs[id->msg_id];
+ rwr.wr.next = &imm_wr.wr;
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.wr_cqe = &local_reg_cqe;
+ rwr.wr.num_sge = 0;
+ rwr.wr.send_flags = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.wr.sg_list = &list;
+ imm_wr.wr.num_sge = 1;
+ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.wr.sg_list = NULL;
+ imm_wr.wr.num_sge = 0;
+ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.wr.send_flags = flags;
+ imm_wr.wr.wr_cqe = &io_comp_cqe;
+
+ imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
+
+ err = ib_post_send(id->con->c.qp, wr, NULL);
+ if (err)
+ rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+void close_path(struct rtrs_srv_path *srv_path)
+{
+ if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
+ queue_work(rtrs_wq, &srv_path->close_work);
+ WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
+}
+
+static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
+{
+ switch (state) {
+ case RTRS_SRV_CONNECTING:
+ return "RTRS_SRV_CONNECTING";
+ case RTRS_SRV_CONNECTED:
+ return "RTRS_SRV_CONNECTED";
+ case RTRS_SRV_CLOSING:
+ return "RTRS_SRV_CLOSING";
+ case RTRS_SRV_CLOSED:
+ return "RTRS_SRV_CLOSED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * rtrs_srv_resp_rdma() - Finish an RDMA request
+ *
+ * @id: Internal RTRS operation identifier
+ * @status: Response Code sent to the other side for this operation.
+ * 0 = success, <=0 error
+ * Context: any
+ *
+ * Finish a RDMA operation. A message is sent to the client and the
+ * corresponding memory areas will be released.
+ */
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
+{
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_srv_con *con;
+ struct rtrs_path *s;
+ int err;
+
+ if (WARN_ON(!id))
+ return true;
+
+ con = id->con;
+ s = con->c.path;
+ srv_path = to_srv_path(s);
+
+ id->status = status;
+
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
+ rtrs_err_rl(s,
+ "Sending I/O response failed, server path %s is disconnected, path state %s\n",
+ kobject_name(&srv_path->kobj),
+ rtrs_srv_state_str(srv_path->state));
+ goto out;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
+
+ ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
+ }
+ if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
+ rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
+ kobject_name(&srv_path->kobj),
+ con->c.cid);
+ atomic_add(1, &con->c.sq_wr_avail);
+ spin_lock(&con->rsp_wr_wait_lock);
+ list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
+ spin_unlock(&con->rsp_wr_wait_lock);
+ return false;
+ }
+
+ if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
+ err = send_io_resp_imm(con, id, status);
+ else
+ err = rdma_write_sg(id);
+
+ if (err) {
+ rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
+ kobject_name(&srv_path->kobj));
+ close_path(srv_path);
+ }
+out:
+ rtrs_srv_put_ops_ids(srv_path);
+ return true;
+}
+EXPORT_SYMBOL(rtrs_srv_resp_rdma);
+
+/**
+ * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
+ * @srv: Session pointer
+ * @priv: The private pointer that is associated with the session.
+ */
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
+{
+ srv->priv = priv;
+}
+EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
+
+static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
+{
+ int i;
+
+ for (i = 0; i < srv_path->mrs_num; i++) {
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &srv_path->mrs[i];
+
+ if (always_invalidate)
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
+
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+ sg_free_table(&srv_mr->sgt);
+ }
+ kfree(srv_path->mrs);
+}
+
+static int map_cont_bufs(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *ss = &srv_path->s;
+ int i, mri, err, mrs_num;
+ unsigned int chunk_bits;
+ int chunks_per_mr = 1;
+
+ /*
+ * Here we map queue_depth chunks to MR. Firstly we have to
+ * figure out how many chunks can we map per MR.
+ */
+ if (always_invalidate) {
+ /*
+ * in order to do invalidate for each chunks of memory, we needs
+ * more memory regions.
+ */
+ mrs_num = srv->queue_depth;
+ } else {
+ chunks_per_mr =
+ srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
+ chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
+ }
+
+ srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
+ if (!srv_path->mrs)
+ return -ENOMEM;
+
+ srv_path->mrs_num = mrs_num;
+
+ for (mri = 0; mri < mrs_num; mri++) {
+ struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri];
+ struct sg_table *sgt = &srv_mr->sgt;
+ struct scatterlist *s;
+ struct ib_mr *mr;
+ int nr, nr_sgt, chunks;
+
+ chunks = chunks_per_mr * mri;
+ if (!always_invalidate)
+ chunks_per_mr = min_t(int, chunks_per_mr,
+ srv->queue_depth - chunks);
+
+ err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
+ if (err)
+ goto err;
+
+ for_each_sg(sgt->sgl, s, chunks_per_mr, i)
+ sg_set_page(s, srv->chunks[chunks + i],
+ max_chunk_size, 0);
+
+ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+ if (!nr_sgt) {
+ err = -EINVAL;
+ goto free_sg;
+ }
+ mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ nr_sgt);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto unmap_sg;
+ }
+ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
+ NULL, max_chunk_size);
+ if (nr < 0 || nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto dereg_mr;
+ }
+
+ if (always_invalidate) {
+ srv_mr->iu = rtrs_iu_alloc(1,
+ sizeof(struct rtrs_msg_rkey_rsp),
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_rdma_done);
+ if (!srv_mr->iu) {
+ err = -ENOMEM;
+ rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
+ goto dereg_mr;
+ }
+ }
+ /* Eventually dma addr for each chunk can be cached */
+ for_each_sg(sgt->sgl, s, nr_sgt, i)
+ srv_path->dma_addr[chunks + i] = sg_dma_address(s);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+ srv_mr->mr = mr;
+
+ continue;
+err:
+ while (mri--) {
+ srv_mr = &srv_path->mrs[mri];
+ sgt = &srv_mr->sgt;
+ mr = srv_mr->mr;
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
+dereg_mr:
+ ib_dereg_mr(mr);
+unmap_sg:
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+free_sg:
+ sg_free_table(sgt);
+ }
+ kfree(srv_path->mrs);
+
+ return err;
+ }
+
+ chunk_bits = ilog2(srv->queue_depth - 1) + 1;
+ srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+
+ return 0;
+}
+
+static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
+{
+ close_path(to_srv_path(c->path));
+}
+
+static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
+{
+ rtrs_init_hb(&srv_path->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_srv_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
+{
+ rtrs_start_hb(&srv_path->s);
+}
+
+static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
+{
+ rtrs_stop_hb(&srv_path->s);
+}
+
+static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(s, "Sess info response send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_path(srv_path);
+ return;
+ }
+ WARN_ON(wc->opcode != IB_WC_SEND);
+}
+
+static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ int up, ret = 0;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+ ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+ if (!ret)
+ srv_path->established = true;
+
+ return ret;
+}
+
+static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ if (!srv_path->established)
+ return;
+
+ srv_path->established = false;
+ mutex_lock(&srv->paths_ev_mutex);
+ WARN_ON(!srv->paths_up);
+ if (--srv->paths_up == 0)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
+ mutex_unlock(&srv->paths_ev_mutex);
+}
+
+static bool exist_pathname(struct rtrs_srv_ctx *ctx,
+ const char *pathname, const uuid_t *path_uuid)
+{
+ struct rtrs_srv_sess *srv;
+ struct rtrs_srv_path *srv_path;
+ bool found = false;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ mutex_lock(&srv->paths_mutex);
+
+ /* when a client with same uuid and same sessname tried to add a path */
+ if (uuid_equal(&srv->paths_uuid, path_uuid)) {
+ mutex_unlock(&srv->paths_mutex);
+ continue;
+ }
+
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (strlen(srv_path->s.sessname) == strlen(pathname) &&
+ !strcmp(srv_path->s.sessname, pathname)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&srv->paths_mutex);
+ if (found)
+ break;
+ }
+ mutex_unlock(&ctx->srv_mutex);
+ return found;
+}
+
+static int post_recv_path(struct rtrs_srv_path *srv_path);
+static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
+
+static int process_info_req(struct rtrs_srv_con *con,
+ struct rtrs_msg_info_req *msg)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct ib_send_wr *reg_wr = NULL;
+ struct rtrs_msg_info_rsp *rsp;
+ struct rtrs_iu *tx_iu;
+ struct ib_reg_wr *rwr;
+ int mri, err;
+ size_t tx_sz;
+
+ err = post_recv_path(srv_path);
+ if (err) {
+ rtrs_err(s, "post_recv_path(), err: %d\n", err);
+ return err;
+ }
+
+ if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
+ rtrs_err(s, "pathname cannot contain / and .\n");
+ return -EINVAL;
+ }
+
+ if (exist_pathname(srv_path->srv->ctx,
+ msg->pathname, &srv_path->srv->paths_uuid)) {
+ rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
+ return -EPERM;
+ }
+ strscpy(srv_path->s.sessname, msg->pathname,
+ sizeof(srv_path->s.sessname));
+
+ rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ if (!rwr)
+ return -ENOMEM;
+
+ tx_sz = sizeof(*rsp);
+ tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
+ if (!tx_iu) {
+ err = -ENOMEM;
+ goto rwr_free;
+ }
+
+ rsp = tx_iu->buf;
+ rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
+ rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
+
+ for (mri = 0; mri < srv_path->mrs_num; mri++) {
+ struct ib_mr *mr = srv_path->mrs[mri].mr;
+
+ rsp->desc[mri].addr = cpu_to_le64(mr->iova);
+ rsp->desc[mri].key = cpu_to_le32(mr->rkey);
+ rsp->desc[mri].len = cpu_to_le32(mr->length);
+
+ /*
+ * Fill in reg MR request and chain them *backwards*
+ */
+ rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
+ rwr[mri].wr.opcode = IB_WR_REG_MR;
+ rwr[mri].wr.wr_cqe = &local_reg_cqe;
+ rwr[mri].wr.num_sge = 0;
+ rwr[mri].wr.send_flags = 0;
+ rwr[mri].mr = mr;
+ rwr[mri].key = mr->rkey;
+ rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ reg_wr = &rwr[mri].wr;
+ }
+
+ err = rtrs_srv_create_path_files(srv_path);
+ if (err)
+ goto iu_free;
+ kobject_get(&srv_path->kobj);
+ get_device(&srv_path->srv->dev);
+ err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+ if (!err) {
+ rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
+ goto iu_free;
+ }
+
+ rtrs_srv_start_hb(srv_path);
+
+ /*
+ * We do not account number of established connections at the current
+ * moment, we rely on the client, which should send info request when
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+ err = rtrs_srv_path_up(srv_path);
+ if (err) {
+ rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
+ goto iu_free;
+ }
+
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info response */
+ err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
+ if (err) {
+ rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
+iu_free:
+ rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
+ }
+rwr_free:
+ kfree(rwr);
+
+ return err;
+}
+
+static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *iu;
+ int err;
+
+ WARN_ON(con->c.cid);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(s, "Sess info request receive failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto close;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (wc->byte_len < sizeof(*msg)) {
+ rtrs_err(s, "Sess info request is malformed: size %d\n",
+ wc->byte_len);
+ goto close;
+ }
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
+ rtrs_err(s, "Sess info request is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto close;
+ }
+ err = process_info_req(con, msg);
+ if (err)
+ goto close;
+
+out:
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
+ return;
+close:
+ close_path(srv_path);
+ goto out;
+}
+
+static int post_recv_info_req(struct rtrs_srv_con *con)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_iu *rx_iu;
+ int err;
+
+ rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_srv_info_req_done);
+ if (!rx_iu)
+ return -ENOMEM;
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&con->c, rx_iu);
+ if (err) {
+ rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
+{
+ int i, err;
+
+ for (i = 0; i < q_size; i++) {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_path(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
+ size_t q_size;
+ int err, cid;
+
+ for (cid = 0; cid < srv_path->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = srv->queue_depth;
+
+ err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
+ if (err) {
+ rtrs_err(s, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void process_read(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_read *msg,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t usr_len, data_len;
+ void *data;
+ int ret;
+
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
+ rtrs_err_rl(s,
+ "Processing read request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(srv_path->state));
+ return;
+ }
+ if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
+ rtrs_err_rl(s,
+ "Processing read request failed, invalid message\n");
+ return;
+ }
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
+ id = srv_path->ops_ids[buf_id];
+ id->con = con;
+ id->dir = READ;
+ id->msg_id = buf_id;
+ id->rd_msg = msg;
+ usr_len = le16_to_cpu(msg->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
+ data + data_len, usr_len);
+
+ if (ret) {
+ rtrs_err_rl(s,
+ "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
+ buf_id, ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_path(srv_path);
+ }
+ rtrs_srv_put_ops_ids(srv_path);
+}
+
+static void process_write(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_write *req,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t data_len, usr_len;
+ void *data;
+ int ret;
+
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
+ rtrs_err_rl(s,
+ "Processing write request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(srv_path->state));
+ return;
+ }
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
+ id = srv_path->ops_ids[buf_id];
+ id->con = con;
+ id->dir = WRITE;
+ id->msg_id = buf_id;
+
+ usr_len = le16_to_cpu(req->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
+ data + data_len, usr_len);
+ if (ret) {
+ rtrs_err_rl(s,
+ "Processing write request failed, user module callback reports err: %d\n",
+ ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_path(srv_path);
+ }
+ rtrs_srv_put_ops_ids(srv_path);
+}
+
+static void process_io_req(struct rtrs_srv_con *con, void *msg,
+ u32 id, u32 off)
+{
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_msg_rdma_hdr *hdr;
+ unsigned int type;
+
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
+ srv_path->dma_addr[id],
+ max_chunk_size, DMA_BIDIRECTIONAL);
+ hdr = msg;
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RTRS_MSG_WRITE:
+ process_write(con, msg, id, off);
+ break;
+ case RTRS_MSG_READ:
+ process_read(con, msg, id, off);
+ break;
+ default:
+ rtrs_err(s,
+ "Processing I/O request failed, unknown message type received: 0x%02x\n",
+ type);
+ goto err;
+ }
+
+ return;
+
+err:
+ close_path(srv_path);
+}
+
+static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_mr *mr =
+ container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ u32 msg_id, off;
+ void *data;
+
+ if (wc->status != IB_WC_SUCCESS) {
+ rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_path(srv_path);
+ }
+ msg_id = mr->msg_id;
+ off = mr->msg_off;
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+}
+
+static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
+ struct rtrs_srv_mr *mr)
+{
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &mr->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = mr->mr->rkey,
+ };
+ mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
+{
+ spin_lock(&con->rsp_wr_wait_lock);
+ while (!list_empty(&con->rsp_wr_wait_list)) {
+ struct rtrs_srv_op *id;
+ int ret;
+
+ id = list_entry(con->rsp_wr_wait_list.next,
+ struct rtrs_srv_op, wait_list);
+ list_del(&id->wait_list);
+
+ spin_unlock(&con->rsp_wr_wait_lock);
+ ret = rtrs_srv_resp_rdma(id, id->status);
+ spin_lock(&con->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&id->wait_list, &con->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&con->rsp_wr_wait_lock);
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ u32 imm_type, imm_payload;
+ int err;
+
+ if (wc->status != IB_WC_SUCCESS) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(s,
+ "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
+ ib_wc_status_msg(wc->status), wc->wr_cqe,
+ wc->opcode, wc->vendor_err, wc->byte_len);
+ close_path(srv_path);
+ }
+ return;
+ }
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
+ return;
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (err) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
+ close_path(srv_path);
+ break;
+ }
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (imm_type == RTRS_IO_REQ_IMM) {
+ u32 msg_id, off;
+ void *data;
+
+ msg_id = imm_payload >> srv_path->mem_bits;
+ off = imm_payload & ((1 << srv_path->mem_bits) - 1);
+ if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
+ rtrs_err(s, "Wrong msg_id %u, off %u\n",
+ msg_id, off);
+ close_path(srv_path);
+ return;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
+
+ mr->msg_off = off;
+ mr->msg_id = msg_id;
+ err = rtrs_srv_inv_rkey(con, mr);
+ if (err) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n",
+ err);
+ close_path(srv_path);
+ break;
+ }
+ } else {
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+ }
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&srv_path->s);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ srv_path->s.hb_missed_cnt = 0;
+ } else {
+ rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ case IB_WC_SEND:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb.
+ */
+ atomic_add(s->signal_interval, &con->c.sq_wr_avail);
+
+ if (!list_empty_careful(&con->rsp_wr_wait_list))
+ rtrs_rdma_process_wr_wait_list(con);
+
+ break;
+ default:
+ rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+/**
+ * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
+ * @srv: Session
+ * @pathname: Pathname buffer
+ * @len: Length of sessname buffer
+ */
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
+ size_t len)
+{
+ struct rtrs_srv_path *srv_path;
+ int err = -ENOTCONN;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (srv_path->state != RTRS_SRV_CONNECTED)
+ continue;
+ strscpy(pathname, srv_path->s.sessname,
+ min_t(size_t, sizeof(srv_path->s.sessname), len));
+ err = 0;
+ break;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_srv_get_path_name);
+
+/**
+ * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
+ * @srv: Session
+ */
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
+{
+ return srv->queue_depth;
+}
+EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
+
+static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
+{
+ struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
+ int v;
+
+ v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
+ if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
+ v = cpumask_first(&cq_affinity_mask);
+ return v;
+}
+
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
+{
+ srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
+
+ return srv_path->cur_cq_vector;
+}
+
+static void rtrs_srv_dev_release(struct device *dev)
+{
+ struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
+ dev);
+
+ kfree(srv);
+}
+
+static void free_srv(struct rtrs_srv_sess *srv)
+{
+ int i;
+
+ WARN_ON(refcount_read(&srv->refcount));
+ for (i = 0; i < srv->queue_depth; i++)
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
+ kfree(srv->chunks);
+ mutex_destroy(&srv->paths_mutex);
+ mutex_destroy(&srv->paths_ev_mutex);
+ /* last put to release the srv structure */
+ put_device(&srv->dev);
+}
+
+static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid,
+ bool first_conn)
+{
+ struct rtrs_srv_sess *srv;
+ int i;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
+ refcount_inc_not_zero(&srv->refcount)) {
+ mutex_unlock(&ctx->srv_mutex);
+ return srv;
+ }
+ }
+ mutex_unlock(&ctx->srv_mutex);
+ /*
+ * If this request is not the first connection request from the
+ * client for this session then fail and return error.
+ */
+ if (!first_conn) {
+ pr_err_ratelimited("Error: Not the first connection request for this session\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ /* need to allocate a new srv */
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&srv->paths_list);
+ mutex_init(&srv->paths_mutex);
+ mutex_init(&srv->paths_ev_mutex);
+ uuid_copy(&srv->paths_uuid, paths_uuid);
+ srv->queue_depth = sess_queue_depth;
+ srv->ctx = ctx;
+ device_initialize(&srv->dev);
+ srv->dev.release = rtrs_srv_dev_release;
+
+ srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
+ GFP_KERNEL);
+ if (!srv->chunks)
+ goto err_free_srv;
+
+ for (i = 0; i < srv->queue_depth; i++) {
+ srv->chunks[i] = alloc_pages(GFP_KERNEL,
+ get_order(max_chunk_size));
+ if (!srv->chunks[i])
+ goto err_free_chunks;
+ }
+ refcount_set(&srv->refcount, 1);
+ mutex_lock(&ctx->srv_mutex);
+ list_add(&srv->ctx_list, &ctx->srv_list);
+ mutex_unlock(&ctx->srv_mutex);
+
+ return srv;
+
+err_free_chunks:
+ while (i--)
+ __free_pages(srv->chunks[i], get_order(max_chunk_size));
+ kfree(srv->chunks);
+
+err_free_srv:
+ kfree(srv);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void put_srv(struct rtrs_srv_sess *srv)
+{
+ if (refcount_dec_and_test(&srv->refcount)) {
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ WARN_ON(srv->dev.kobj.state_in_sysfs);
+
+ mutex_lock(&ctx->srv_mutex);
+ list_del(&srv->ctx_list);
+ mutex_unlock(&ctx->srv_mutex);
+ free_srv(srv);
+ }
+}
+
+static void __add_path_to_srv(struct rtrs_srv_sess *srv,
+ struct rtrs_srv_path *srv_path)
+{
+ list_add_tail(&srv_path->s.entry, &srv->paths_list);
+ srv->paths_num++;
+ WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
+}
+
+static void del_path_from_srv(struct rtrs_srv_path *srv_path)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+
+ if (WARN_ON(!srv))
+ return;
+
+ mutex_lock(&srv->paths_mutex);
+ list_del(&srv_path->s.entry);
+ WARN_ON(!srv->paths_num);
+ srv->paths_num--;
+ mutex_unlock(&srv->paths_mutex);
+}
+
+/* return true if addresses are the same, error other wise */
+static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
+{
+ switch (a->sa_family) {
+ case AF_IB:
+ return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
+ &((struct sockaddr_ib *)b)->sib_addr,
+ sizeof(struct ib_addr)) &&
+ (b->sa_family == AF_IB);
+ case AF_INET:
+ return memcmp(&((struct sockaddr_in *)a)->sin_addr,
+ &((struct sockaddr_in *)b)->sin_addr,
+ sizeof(struct in_addr)) &&
+ (b->sa_family == AF_INET);
+ case AF_INET6:
+ return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
+ &((struct sockaddr_in6 *)b)->sin6_addr,
+ sizeof(struct in6_addr)) &&
+ (b->sa_family == AF_INET6);
+ default:
+ return -ENOENT;
+ }
+}
+
+static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
+ struct rdma_addr *addr)
+{
+ struct rtrs_srv_path *srv_path;
+
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
+ (struct sockaddr *)&addr->dst_addr) &&
+ !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
+ (struct sockaddr *)&addr->src_addr))
+ return true;
+
+ return false;
+}
+
+static void free_path(struct rtrs_srv_path *srv_path)
+{
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->kobj);
+ kobject_put(&srv_path->kobj);
+ } else {
+ free_percpu(srv_path->stats->rdma_stats);
+ kfree(srv_path->stats);
+ kfree(srv_path);
+ }
+}
+
+static void rtrs_srv_close_work(struct work_struct *work)
+{
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_srv_con *con;
+ int i;
+
+ srv_path = container_of(work, typeof(*srv_path), close_work);
+
+ rtrs_srv_stop_hb(srv_path);
+
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
+ continue;
+ con = to_srv_con(srv_path->s.con[i]);
+ rdma_disconnect(con->c.cm_id);
+ ib_drain_qp(con->c.qp);
+ }
+
+ /*
+ * Degrade ref count to the usual model with a single shared
+ * atomic_t counter
+ */
+ percpu_ref_kill(&srv_path->ids_inflight_ref);
+
+ /* Wait for all completion */
+ wait_for_completion(&srv_path->complete_done);
+
+ rtrs_srv_destroy_path_files(srv_path);
+
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_path_down(srv_path);
+
+ unmap_cont_bufs(srv_path);
+ rtrs_srv_free_ops_ids(srv_path);
+
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
+ continue;
+ con = to_srv_con(srv_path->s.con[i]);
+ rtrs_cq_qp_destroy(&con->c);
+ rdma_destroy_id(con->c.cm_id);
+ kfree(con);
+ }
+ rtrs_ib_dev_put(srv_path->s.dev);
+
+ del_path_from_srv(srv_path);
+ put_srv(srv_path->srv);
+ srv_path->srv = NULL;
+ rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
+
+ kfree(srv_path->dma_addr);
+ kfree(srv_path->s.con);
+ free_path(srv_path);
+}
+
+static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
+ struct rdma_cm_id *cm_id)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_msg_conn_rsp msg;
+ struct rdma_conn_param param;
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .queue_depth = cpu_to_le16(srv->queue_depth),
+ .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
+ .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
+ };
+
+ if (always_invalidate)
+ msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
+
+ err = rdma_accept(cm_id, &param);
+ if (err)
+ pr_err("rdma_accept(), err: %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
+{
+ struct rtrs_msg_conn_rsp msg;
+ int err;
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .errno = cpu_to_le16(errno),
+ };
+
+ err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
+ if (err)
+ pr_err("rdma_reject(), err: %d\n", err);
+
+ /* Bounce errno back */
+ return errno;
+}
+
+static struct rtrs_srv_path *
+__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
+{
+ struct rtrs_srv_path *srv_path;
+
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (uuid_equal(&srv_path->s.uuid, sess_uuid))
+ return srv_path;
+ }
+
+ return NULL;
+}
+
+static int create_con(struct rtrs_srv_path *srv_path,
+ struct rdma_cm_id *cm_id,
+ unsigned int cid)
+{
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
+ struct rtrs_srv_con *con;
+
+ u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
+ int err, cq_vector;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&con->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&con->rsp_wr_wait_list);
+ con->c.cm_id = cm_id;
+ con->c.path = &srv_path->s;
+ con->c.cid = cid;
+ atomic_set(&con->c.wr_cnt, 1);
+ wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
+
+ if (con->c.cid == 0) {
+ /*
+ * All receive and all send (each requiring invalidate)
+ * + 2 for drain and heartbeat
+ */
+ max_send_wr = min_t(int, wr_limit,
+ SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+ max_recv_wr = max_send_wr;
+ s->signal_interval = min_not_zero(srv->queue_depth,
+ (size_t)SERVICE_CON_QUEUE_DEPTH);
+ } else {
+ /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
+ if (always_invalidate)
+ max_send_wr =
+ min_t(int, wr_limit,
+ srv->queue_depth * (1 + 4) + 1);
+ else
+ max_send_wr =
+ min_t(int, wr_limit,
+ srv->queue_depth * (1 + 2) + 1);
+
+ max_recv_wr = srv->queue_depth + 1;
+ /*
+ * If we have all receive requests posted and
+ * all write requests posted and each read request
+ * requires an invalidate request + drain
+ * and qp gets into error state.
+ */
+ }
+ cq_num = max_send_wr + max_recv_wr;
+ atomic_set(&con->c.sq_wr_avail, max_send_wr);
+ cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
+
+ /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
+ max_send_wr, max_recv_wr,
+ IB_POLL_WORKQUEUE);
+ if (err) {
+ rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+ goto free_con;
+ }
+ if (con->c.cid == 0) {
+ err = post_recv_info_req(con);
+ if (err)
+ goto free_cqqp;
+ }
+ WARN_ON(srv_path->s.con[cid]);
+ srv_path->s.con[cid] = &con->c;
+
+ /*
+ * Change context from server to current connection. The other
+ * way is to use cm_id->qp->qp_context, which does not work on OFED.
+ */
+ cm_id->context = &con->c;
+
+ return 0;
+
+free_cqqp:
+ rtrs_cq_qp_destroy(&con->c);
+free_con:
+ kfree(con);
+
+err:
+ return err;
+}
+
+static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
+ struct rdma_cm_id *cm_id,
+ unsigned int con_num,
+ unsigned int recon_cnt,
+ const uuid_t *uuid)
+{
+ struct rtrs_srv_path *srv_path;
+ int err = -ENOMEM;
+ char str[NAME_MAX];
+ struct rtrs_addr path;
+
+ if (srv->paths_num >= MAX_PATHS_NUM) {
+ err = -ECONNRESET;
+ goto err;
+ }
+ if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
+ err = -EEXIST;
+ pr_err("Path with same addr exists\n");
+ goto err;
+ }
+ srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
+ if (!srv_path)
+ goto err;
+
+ srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
+ if (!srv_path->stats)
+ goto err_free_sess;
+
+ srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
+ if (!srv_path->stats->rdma_stats)
+ goto err_free_stats;
+
+ srv_path->stats->srv_path = srv_path;
+
+ srv_path->dma_addr = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->dma_addr),
+ GFP_KERNEL);
+ if (!srv_path->dma_addr)
+ goto err_free_percpu;
+
+ srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
+ GFP_KERNEL);
+ if (!srv_path->s.con)
+ goto err_free_dma_addr;
+
+ srv_path->state = RTRS_SRV_CONNECTING;
+ srv_path->srv = srv;
+ srv_path->cur_cq_vector = -1;
+ srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
+ srv_path->s.src_addr = cm_id->route.addr.src_addr;
+
+ /* temporary until receiving session-name from client */
+ path.src = &srv_path->s.src_addr;
+ path.dst = &srv_path->s.dst_addr;
+ rtrs_addr_to_str(&path, str, sizeof(str));
+ strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
+
+ srv_path->s.con_num = con_num;
+ srv_path->s.irq_con_num = con_num;
+ srv_path->s.recon_cnt = recon_cnt;
+ uuid_copy(&srv_path->s.uuid, uuid);
+ spin_lock_init(&srv_path->state_lock);
+ INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(srv_path);
+
+ srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!srv_path->s.dev) {
+ err = -ENOMEM;
+ goto err_free_con;
+ }
+ err = map_cont_bufs(srv_path);
+ if (err)
+ goto err_put_dev;
+
+ err = rtrs_srv_alloc_ops_ids(srv_path);
+ if (err)
+ goto err_unmap_bufs;
+
+ __add_path_to_srv(srv, srv_path);
+
+ return srv_path;
+
+err_unmap_bufs:
+ unmap_cont_bufs(srv_path);
+err_put_dev:
+ rtrs_ib_dev_put(srv_path->s.dev);
+err_free_con:
+ kfree(srv_path->s.con);
+err_free_dma_addr:
+ kfree(srv_path->dma_addr);
+err_free_percpu:
+ free_percpu(srv_path->stats->rdma_stats);
+err_free_stats:
+ kfree(srv_path->stats);
+err_free_sess:
+ kfree(srv_path);
+err:
+ return ERR_PTR(err);
+}
+
+static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
+ const struct rtrs_msg_conn_req *msg,
+ size_t len)
+{
+ struct rtrs_srv_ctx *ctx = cm_id->context;
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_srv_sess *srv;
+
+ u16 version, con_num, cid;
+ u16 recon_cnt;
+ int err = -ECONNRESET;
+
+ if (len < sizeof(*msg)) {
+ pr_err("Invalid RTRS connection request\n");
+ goto reject_w_err;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ pr_err("Invalid RTRS magic\n");
+ goto reject_w_err;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ pr_err("Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ goto reject_w_err;
+ }
+ con_num = le16_to_cpu(msg->cid_num);
+ if (con_num > 4096) {
+ /* Sanity check */
+ pr_err("Too many connections requested: %d\n", con_num);
+ goto reject_w_err;
+ }
+ cid = le16_to_cpu(msg->cid);
+ if (cid >= con_num) {
+ /* Sanity check */
+ pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
+ goto reject_w_err;
+ }
+ recon_cnt = le16_to_cpu(msg->recon_cnt);
+ srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
+ if (IS_ERR(srv)) {
+ err = PTR_ERR(srv);
+ pr_err("get_or_create_srv(), error %d\n", err);
+ goto reject_w_err;
+ }
+ mutex_lock(&srv->paths_mutex);
+ srv_path = __find_path(srv, &msg->sess_uuid);
+ if (srv_path) {
+ struct rtrs_path *s = &srv_path->s;
+
+ /* Session already holds a reference */
+ put_srv(srv);
+
+ if (srv_path->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Session in wrong state: %s\n",
+ rtrs_srv_state_str(srv_path->state));
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_err;
+ }
+ /*
+ * Sanity checks
+ */
+ if (con_num != s->con_num || cid >= s->con_num) {
+ rtrs_err(s, "Incorrect request: %d, %d\n",
+ cid, con_num);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_err;
+ }
+ if (s->con[cid]) {
+ rtrs_err(s, "Connection already exists: %d\n",
+ cid);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_err;
+ }
+ } else {
+ srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
+ &msg->sess_uuid);
+ if (IS_ERR(srv_path)) {
+ mutex_unlock(&srv->paths_mutex);
+ put_srv(srv);
+ err = PTR_ERR(srv_path);
+ pr_err("RTRS server session allocation failed: %d\n", err);
+ goto reject_w_err;
+ }
+ }
+ err = create_con(srv_path, cm_id, cid);
+ if (err) {
+ rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
+ rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since session has other connections we follow normal way
+ * through workqueue, but still return an error to tell cma.c
+ * to call rdma_destroy_id() for current connection.
+ */
+ goto close_and_return_err;
+ }
+ err = rtrs_rdma_do_accept(srv_path, cm_id);
+ if (err) {
+ rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
+ rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since current connection was successfully added to the
+ * session we follow normal way through workqueue to close the
+ * session, thus return 0 to tell cma.c we call
+ * rdma_destroy_id() ourselves.
+ */
+ err = 0;
+ goto close_and_return_err;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return 0;
+
+reject_w_err:
+ return rtrs_rdma_do_reject(cm_id, err);
+
+close_and_return_err:
+ mutex_unlock(&srv->paths_mutex);
+ close_path(srv_path);
+
+ return err;
+}
+
+static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_srv_path *srv_path = NULL;
+ struct rtrs_path *s = NULL;
+
+ if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
+ struct rtrs_con *c = cm_id->context;
+
+ s = c->path;
+ srv_path = to_srv_path(s);
+ }
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ /*
+ * In case of error cma.c will destroy cm_id,
+ * see cma_process_remove()
+ */
+ return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
+ ev->param.conn.private_data_len);
+ case RDMA_CM_EVENT_ESTABLISHED:
+ /* Nothing here */
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ fallthrough;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ close_path(srv_path);
+ break;
+ default:
+ pr_err("Ignoring unexpected CM event %s, err %d\n",
+ rdma_event_msg(ev->event), ev->status);
+ break;
+ }
+
+ return 0;
+}
+
+static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
+ struct sockaddr *addr,
+ enum rdma_ucm_port_space ps)
+{
+ struct rdma_cm_id *cm_id;
+ int ret;
+
+ cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
+ ctx, ps, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
+ pr_err("Creating id for RDMA connection failed, err: %d\n",
+ ret);
+ goto err_out;
+ }
+ ret = rdma_bind_addr(cm_id, addr);
+ if (ret) {
+ pr_err("Binding RDMA address failed, err: %d\n", ret);
+ goto err_cm;
+ }
+ ret = rdma_listen(cm_id, 64);
+ if (ret) {
+ pr_err("Listening on RDMA connection failed, err: %d\n",
+ ret);
+ goto err_cm;
+ }
+
+ return cm_id;
+
+err_cm:
+ rdma_destroy_id(cm_id);
+err_out:
+
+ return ERR_PTR(ret);
+}
+
+static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
+{
+ struct sockaddr_in6 sin = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ .sin6_port = htons(port),
+ };
+ struct sockaddr_ib sib = {
+ .sib_family = AF_IB,
+ .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
+ .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
+ .sib_pkey = cpu_to_be16(0xffff),
+ };
+ struct rdma_cm_id *cm_ip, *cm_ib;
+ int ret;
+
+ /*
+ * We accept both IPoIB and IB connections, so we need to keep
+ * two cm id's, one for each socket type and port space.
+ * If the cm initialization of one of the id's fails, we abort
+ * everything.
+ */
+ cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
+ if (IS_ERR(cm_ip))
+ return PTR_ERR(cm_ip);
+
+ cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
+ if (IS_ERR(cm_ib)) {
+ ret = PTR_ERR(cm_ib);
+ goto free_cm_ip;
+ }
+
+ ctx->cm_id_ip = cm_ip;
+ ctx->cm_id_ib = cm_ib;
+
+ return 0;
+
+free_cm_ip:
+ rdma_destroy_id(cm_ip);
+
+ return ret;
+}
+
+static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
+{
+ struct rtrs_srv_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->ops = *ops;
+ mutex_init(&ctx->srv_mutex);
+ INIT_LIST_HEAD(&ctx->srv_list);
+
+ return ctx;
+}
+
+static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
+{
+ WARN_ON(!list_empty(&ctx->srv_list));
+ mutex_destroy(&ctx->srv_mutex);
+ kfree(ctx);
+}
+
+static int rtrs_srv_add_one(struct ib_device *device)
+{
+ struct rtrs_srv_ctx *ctx;
+ int ret = 0;
+
+ mutex_lock(&ib_ctx.ib_dev_mutex);
+ if (ib_ctx.ib_dev_count)
+ goto out;
+
+ /*
+ * Since our CM IDs are NOT bound to any ib device we will create them
+ * only once
+ */
+ ctx = ib_ctx.srv_ctx;
+ ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
+ if (ret) {
+ /*
+ * We errored out here.
+ * According to the ib code, if we encounter an error here then the
+ * error code is ignored, and no more calls to our ops are made.
+ */
+ pr_err("Failed to initialize RDMA connection");
+ goto err_out;
+ }
+
+out:
+ /*
+ * Keep a track on the number of ib devices added
+ */
+ ib_ctx.ib_dev_count++;
+
+err_out:
+ mutex_unlock(&ib_ctx.ib_dev_mutex);
+ return ret;
+}
+
+static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
+{
+ struct rtrs_srv_ctx *ctx;
+
+ mutex_lock(&ib_ctx.ib_dev_mutex);
+ ib_ctx.ib_dev_count--;
+
+ if (ib_ctx.ib_dev_count)
+ goto out;
+
+ /*
+ * Since our CM IDs are NOT bound to any ib device we will remove them
+ * only once, when the last device is removed
+ */
+ ctx = ib_ctx.srv_ctx;
+ rdma_destroy_id(ctx->cm_id_ip);
+ rdma_destroy_id(ctx->cm_id_ib);
+
+out:
+ mutex_unlock(&ib_ctx.ib_dev_mutex);
+}
+
+static struct ib_client rtrs_srv_client = {
+ .name = "rtrs_server",
+ .add = rtrs_srv_add_one,
+ .remove = rtrs_srv_remove_one
+};
+
+/**
+ * rtrs_srv_open() - open RTRS server context
+ * @ops: callback functions
+ * @port: port to listen on
+ *
+ * Creates server context with specified callbacks.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
+{
+ struct rtrs_srv_ctx *ctx;
+ int err;
+
+ ctx = alloc_srv_ctx(ops);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&ib_ctx.ib_dev_mutex);
+ ib_ctx.srv_ctx = ctx;
+ ib_ctx.port = port;
+
+ err = ib_register_client(&rtrs_srv_client);
+ if (err) {
+ free_srv_ctx(ctx);
+ return ERR_PTR(err);
+ }
+
+ return ctx;
+}
+EXPORT_SYMBOL(rtrs_srv_open);
+
+static void close_paths(struct rtrs_srv_sess *srv)
+{
+ struct rtrs_srv_path *srv_path;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ close_path(srv_path);
+ mutex_unlock(&srv->paths_mutex);
+}
+
+static void close_ctx(struct rtrs_srv_ctx *ctx)
+{
+ struct rtrs_srv_sess *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list)
+ close_paths(srv);
+ mutex_unlock(&ctx->srv_mutex);
+ flush_workqueue(rtrs_wq);
+}
+
+/**
+ * rtrs_srv_close() - close RTRS server context
+ * @ctx: pointer to server context
+ *
+ * Closes RTRS server context with all client sessions.
+ */
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
+{
+ ib_unregister_client(&rtrs_srv_client);
+ mutex_destroy(&ib_ctx.ib_dev_mutex);
+ close_ctx(ctx);
+ free_srv_ctx(ctx);
+}
+EXPORT_SYMBOL(rtrs_srv_close);
+
+static int check_module_params(void)
+{
+ if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
+ sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+ if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
+ pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
+ max_chunk_size, MIN_CHUNK_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and the
+ * offset inside the memory chunk
+ */
+ if ((ilog2(sess_queue_depth - 1) + 1) +
+ (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
+ pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
+ MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init rtrs_server_init(void)
+{
+ int err;
+
+ pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
+ KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
+ max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
+ sess_queue_depth, always_invalidate);
+
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ err = check_module_params();
+ if (err) {
+ pr_err("Failed to load module, invalid module parameters, err: %d\n",
+ err);
+ return err;
+ }
+ rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
+ if (IS_ERR(rtrs_dev_class)) {
+ err = PTR_ERR(rtrs_dev_class);
+ goto out_err;
+ }
+ rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
+ if (!rtrs_wq) {
+ err = -ENOMEM;
+ goto out_dev_class;
+ }
+
+ return 0;
+
+out_dev_class:
+ class_destroy(rtrs_dev_class);
+out_err:
+ return err;
+}
+
+static void __exit rtrs_server_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_dev_class);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_server_init);
+module_exit(rtrs_server_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
new file mode 100644
index 000000000..2f8a638e3
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_SRV_H
+#define RTRS_SRV_H
+
+#include <linux/device.h>
+#include <linux/refcount.h>
+#include <linux/percpu.h>
+#include "rtrs-pri.h"
+
+/*
+ * enum rtrs_srv_state - Server states.
+ */
+enum rtrs_srv_state {
+ RTRS_SRV_CONNECTING,
+ RTRS_SRV_CONNECTED,
+ RTRS_SRV_CLOSING,
+ RTRS_SRV_CLOSED,
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-server for details
+ */
+struct rtrs_srv_stats_rdma_stats {
+ struct {
+ u64 cnt;
+ u64 size_total;
+ } dir[2];
+};
+
+struct rtrs_srv_stats {
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats __percpu *rdma_stats;
+ struct rtrs_srv_path *srv_path;
+};
+
+struct rtrs_srv_con {
+ struct rtrs_con c;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+};
+
+/* IO context in rtrs_srv, each io has one */
+struct rtrs_srv_op {
+ struct rtrs_srv_con *con;
+ u32 msg_id;
+ u8 dir;
+ struct rtrs_msg_rdma_read *rd_msg;
+ struct ib_rdma_wr tx_wr;
+ struct ib_sge tx_sg;
+ struct list_head wait_list;
+ int status;
+};
+
+/*
+ * server side memory region context, when always_invalidate=Y, we need
+ * queue_depth of memory region to invalidate each memory region.
+ */
+struct rtrs_srv_mr {
+ struct ib_mr *mr;
+ struct sg_table sgt;
+ struct ib_cqe inv_cqe; /* only for always_invalidate=true */
+ u32 msg_id; /* only for always_invalidate=true */
+ u32 msg_off; /* only for always_invalidate=true */
+ struct rtrs_iu *iu; /* send buffer for new rkey msg */
+};
+
+struct rtrs_srv_path {
+ struct rtrs_path s;
+ struct rtrs_srv_sess *srv;
+ struct work_struct close_work;
+ enum rtrs_srv_state state;
+ spinlock_t state_lock;
+ int cur_cq_vector;
+ struct rtrs_srv_op **ops_ids;
+ struct percpu_ref ids_inflight_ref;
+ struct completion complete_done;
+ struct rtrs_srv_mr *mrs;
+ unsigned int mrs_num;
+ dma_addr_t *dma_addr;
+ bool established;
+ unsigned int mem_bits;
+ struct kobject kobj;
+ struct rtrs_srv_stats *stats;
+};
+
+static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
+{
+ return container_of(s, struct rtrs_srv_path, s);
+}
+
+struct rtrs_srv_sess {
+ struct list_head paths_list;
+ int paths_up;
+ struct mutex paths_ev_mutex;
+ size_t paths_num;
+ struct mutex paths_mutex;
+ uuid_t paths_uuid;
+ refcount_t refcount;
+ struct rtrs_srv_ctx *ctx;
+ struct list_head ctx_list;
+ void *priv;
+ size_t queue_depth;
+ struct page **chunks;
+ struct device dev;
+ unsigned int dev_ref;
+ struct kobject *kobj_paths;
+};
+
+struct rtrs_srv_ctx {
+ struct rtrs_srv_ops ops;
+ struct rdma_cm_id *cm_id_ip;
+ struct rdma_cm_id *cm_id_ib;
+ struct mutex srv_mutex;
+ struct list_head srv_list;
+};
+
+struct rtrs_srv_ib_ctx {
+ struct rtrs_srv_ctx *srv_ctx;
+ u16 port;
+ struct mutex ib_dev_mutex;
+ int ib_dev_count;
+};
+
+extern struct class *rtrs_dev_class;
+
+void close_path(struct rtrs_srv_path *srv_path);
+
+static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
+ size_t size, int d)
+{
+ this_cpu_inc(s->rdma_stats->dir[d].cnt);
+ this_cpu_add(s->rdma_stats->dir[d].size_total, size);
+}
+
+/* functions which are implemented in rtrs-srv-stats.c */
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page);
+int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+
+/* functions which are implemented in rtrs-srv-sysfs.c */
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path);
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path);
+
+#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
new file mode 100644
index 000000000..716ec7bad
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/inet.h>
+
+#include "rtrs-pri.h"
+#include "rtrs-log.h"
+
+MODULE_DESCRIPTION("RDMA Transport Core");
+MODULE_LICENSE("GPL");
+
+struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
+ struct ib_device *dma_dev,
+ enum dma_data_direction dir,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc))
+{
+ struct rtrs_iu *ius, *iu;
+ int i;
+
+ ius = kcalloc(iu_num, sizeof(*ius), gfp_mask);
+ if (!ius)
+ return NULL;
+ for (i = 0; i < iu_num; i++) {
+ iu = &ius[i];
+ iu->direction = dir;
+ iu->buf = kzalloc(size, gfp_mask);
+ if (!iu->buf)
+ goto err;
+
+ iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+ if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
+ kfree(iu->buf);
+ goto err;
+ }
+
+ iu->cqe.done = done;
+ iu->size = size;
+ }
+ return ius;
+err:
+ rtrs_iu_free(ius, dma_dev, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
+
+void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_num)
+{
+ struct rtrs_iu *iu;
+ int i;
+
+ if (!ius)
+ return;
+
+ for (i = 0; i < queue_num; i++) {
+ iu = &ius[i];
+ ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
+ kfree(iu->buf);
+ }
+ kfree(ius);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_free);
+
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
+{
+ struct rtrs_path *path = con->path;
+ struct ib_recv_wr wr;
+ struct ib_sge list;
+
+ list.addr = iu->dma_addr;
+ list.length = iu->size;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
+
+ if (list.length == 0) {
+ rtrs_wrn(con->path,
+ "Posting receive work request failed, sg list is empty\n");
+ return -EINVAL;
+ }
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr;
+
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = cqe,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
+
+static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
+ struct ib_send_wr *wr, struct ib_send_wr *tail)
+{
+ if (head) {
+ struct ib_send_wr *next = head;
+
+ while (next->next)
+ next = next->next;
+ next->next = wr;
+ } else {
+ head = wr;
+ }
+
+ if (tail)
+ wr->next = tail;
+
+ return ib_post_send(qp, head, NULL);
+}
+
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head)
+{
+ struct rtrs_path *path = con->path;
+ struct ib_send_wr wr;
+ struct ib_sge list;
+
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
+ list.addr = iu->dma_addr;
+ list.length = size;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ .opcode = IB_WR_SEND,
+ .send_flags = IB_SEND_SIGNALED,
+ };
+
+ return rtrs_post_send(con->qp, head, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
+
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head,
+ struct ib_send_wr *tail)
+{
+ struct ib_rdma_wr wr;
+ int i;
+
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = &iu->cqe,
+ .wr.sg_list = sge,
+ .wr.num_sge = num_sge,
+ .rkey = rkey,
+ .remote_addr = rdma_addr,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
+ .wr.send_flags = flags,
+ };
+
+ /*
+ * If one of the sges has 0 size, the operation will fail with a
+ * length error
+ */
+ for (i = 0; i < num_sge; i++)
+ if (WARN_ONCE(sge[i].length == 0, "sg %d is zero length\n", i))
+ return -EINVAL;
+
+ return rtrs_post_send(con->qp, head, &wr.wr, tail);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
+
+static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
+ struct ib_cqe *cqe,
+ u32 imm_data,
+ struct ib_send_wr *head)
+{
+ struct ib_rdma_wr wr;
+ struct rtrs_path *path = con->path;
+ enum ib_send_flags sflags;
+
+ atomic_dec_if_positive(&con->sq_wr_avail);
+ sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
+ 0 : IB_SEND_SIGNALED;
+
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = cqe,
+ .wr.send_flags = sflags,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
+ };
+
+ return rtrs_post_send(con->qp, head, &wr.wr, NULL);
+}
+
+static void qp_event_handler(struct ib_event *ev, void *ctx)
+{
+ struct rtrs_con *con = ctx;
+
+ switch (ev->event) {
+ case IB_EVENT_COMM_EST:
+ rtrs_info(con->path, "QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
+ break;
+ default:
+ rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ break;
+ }
+}
+
+static bool is_pollqueue(struct rtrs_con *con)
+{
+ return con->cid >= con->path->irq_con_num;
+}
+
+static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
+ enum ib_poll_context poll_ctx)
+{
+ struct rdma_cm_id *cm_id = con->cm_id;
+ struct ib_cq *cq;
+
+ if (is_pollqueue(con))
+ cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector,
+ poll_ctx);
+ else
+ cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
+
+ if (IS_ERR(cq)) {
+ rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
+ PTR_ERR(cq));
+ return PTR_ERR(cq);
+ }
+ con->cq = cq;
+ con->nr_cqe = nr_cqe;
+
+ return 0;
+}
+
+static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
+{
+ struct ib_qp_init_attr init_attr = {NULL};
+ struct rdma_cm_id *cm_id = con->cm_id;
+ int ret;
+
+ init_attr.cap.max_send_wr = max_send_wr;
+ init_attr.cap.max_recv_wr = max_recv_wr;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.event_handler = qp_event_handler;
+ init_attr.qp_context = con;
+ init_attr.cap.max_send_sge = max_sge;
+
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = con->cq;
+ init_attr.recv_cq = con->cq;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+
+ ret = rdma_create_qp(cm_id, pd, &init_attr);
+ if (ret) {
+ rtrs_err(con->path, "Creating QP failed, err: %d\n", ret);
+ return ret;
+ }
+ con->qp = cm_id->qp;
+
+ return ret;
+}
+
+static void destroy_cq(struct rtrs_con *con)
+{
+ if (con->cq) {
+ if (is_pollqueue(con))
+ ib_free_cq(con->cq);
+ else
+ ib_cq_pool_put(con->cq, con->nr_cqe);
+ }
+ con->cq = NULL;
+}
+
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, int nr_cqe,
+ u32 max_send_wr, u32 max_recv_wr,
+ enum ib_poll_context poll_ctx)
+{
+ int err;
+
+ err = create_cq(con, cq_vector, nr_cqe, poll_ctx);
+ if (err)
+ return err;
+
+ err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
+ max_send_sge);
+ if (err) {
+ destroy_cq(con);
+ return err;
+ }
+ con->path = path;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
+
+void rtrs_cq_qp_destroy(struct rtrs_con *con)
+{
+ if (con->qp) {
+ rdma_destroy_qp(con->cm_id);
+ con->qp = NULL;
+ }
+ destroy_cq(con);
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
+
+static void schedule_hb(struct rtrs_path *path)
+{
+ queue_delayed_work(path->hb_wq, &path->hb_dwork,
+ msecs_to_jiffies(path->hb_interval_ms));
+}
+
+void rtrs_send_hb_ack(struct rtrs_path *path)
+{
+ struct rtrs_con *usr_con = path->con[0];
+ u32 imm;
+ int err;
+
+ imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
+ NULL);
+ if (err) {
+ rtrs_err(path, "send HB ACK failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
+
+static void hb_work(struct work_struct *work)
+{
+ struct rtrs_con *usr_con;
+ struct rtrs_path *path;
+ u32 imm;
+ int err;
+
+ path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
+ usr_con = path->con[0];
+
+ if (path->hb_missed_cnt > path->hb_missed_max) {
+ rtrs_err(path, "HB missed max reached.\n");
+ path->hb_err_handler(usr_con);
+ return;
+ }
+ if (path->hb_missed_cnt++) {
+ /* Reschedule work without sending hb */
+ schedule_hb(path);
+ return;
+ }
+
+ path->hb_last_sent = ktime_get();
+
+ imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
+ NULL);
+ if (err) {
+ rtrs_err(path, "HB send failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
+ return;
+ }
+
+ schedule_hb(path);
+}
+
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq)
+{
+ path->hb_cqe = cqe;
+ path->hb_interval_ms = interval_ms;
+ path->hb_err_handler = err_handler;
+ path->hb_wq = wq;
+ path->hb_missed_max = missed_max;
+ path->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
+}
+EXPORT_SYMBOL_GPL(rtrs_init_hb);
+
+void rtrs_start_hb(struct rtrs_path *path)
+{
+ schedule_hb(path);
+}
+EXPORT_SYMBOL_GPL(rtrs_start_hb);
+
+void rtrs_stop_hb(struct rtrs_path *path)
+{
+ cancel_delayed_work_sync(&path->hb_dwork);
+ path->hb_missed_cnt = 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_stop_hb);
+
+static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
+ short port, struct sockaddr_storage *dst)
+{
+ struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
+ int ret;
+
+ /*
+ * We can use some of the IPv6 functions since GID is a valid
+ * IPv6 address format
+ */
+ ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
+ if (ret == 0)
+ return -EINVAL;
+
+ dst_ib->sib_family = AF_IB;
+ /*
+ * Use the same TCP server port number as the IB service ID
+ * on the IB port space range
+ */
+ dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
+ dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
+ dst_ib->sib_pkey = cpu_to_be16(0xffff);
+
+ return 0;
+}
+
+/**
+ * rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
+ * @addr: String representation of an addr (IPv4, IPv6 or IB GID):
+ * - "ip:192.168.1.1"
+ * - "ip:fe80::200:5aee:feaa:20a2"
+ * - "gid:fe80::200:5aee:feaa:20a2"
+ * @len: String address length
+ * @port: Destination port
+ * @dst: Destination sockaddr structure
+ *
+ * Returns 0 if conversion successful. Non-zero on error.
+ */
+static int rtrs_str_to_sockaddr(const char *addr, size_t len,
+ u16 port, struct sockaddr_storage *dst)
+{
+ if (strncmp(addr, "gid:", 4) == 0) {
+ return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
+ } else if (strncmp(addr, "ip:", 3) == 0) {
+ char port_str[8];
+ char *cpy;
+ int err;
+
+ snprintf(port_str, sizeof(port_str), "%u", port);
+ cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
+ err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
+ cpy, port_str, dst) : -ENOMEM;
+ kfree(cpy);
+
+ return err;
+ }
+ return -EPROTONOSUPPORT;
+}
+
+/**
+ * sockaddr_to_str() - convert sockaddr to a string.
+ * @addr: the sockadddr structure to be converted.
+ * @buf: string containing socket addr.
+ * @len: string length.
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'. If len is == 0 the function returns 0..
+ */
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
+{
+ switch (addr->sa_family) {
+ case AF_IB:
+ return scnprintf(buf, len, "gid:%pI6",
+ &((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
+ case AF_INET:
+ return scnprintf(buf, len, "ip:%pI4",
+ &((struct sockaddr_in *)addr)->sin_addr);
+ case AF_INET6:
+ return scnprintf(buf, len, "ip:%pI6c",
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
+ }
+ return scnprintf(buf, len, "<invalid address family>");
+}
+EXPORT_SYMBOL(sockaddr_to_str);
+
+/**
+ * rtrs_addr_to_str() - convert rtrs_addr to a string "src@dst"
+ * @addr: the rtrs_addr structure to be converted
+ * @buf: string containing source and destination addr of a path
+ * separated by '@' I.e. "ip:1.1.1.1@ip:1.1.1.2"
+ * "ip:1.1.1.1@ip:1.1.1.2".
+ * @len: string length
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'.
+ */
+int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len)
+{
+ int cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)addr->src,
+ buf, len);
+ cnt += scnprintf(buf + cnt, len - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)addr->dst,
+ buf + cnt, len - cnt);
+ return cnt;
+}
+EXPORT_SYMBOL(rtrs_addr_to_str);
+
+/**
+ * rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
+ * to sockaddreses
+ * @str: string containing source and destination addr of a path
+ * separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
+ * "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
+ * considered to be destination.
+ * @len: string length
+ * @port: Destination port number.
+ * @addr: will be set to the source/destination address or to NULL
+ * if str doesn't contain any source address.
+ *
+ * Returns zero if conversion successful. Non-zero otherwise.
+ */
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr)
+{
+ const char *d;
+
+ d = strchr(str, ',');
+ if (!d)
+ d = strchr(str, '@');
+ if (d) {
+ if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
+ return -EINVAL;
+ d += 1;
+ len -= d - str;
+ str = d;
+
+ } else {
+ addr->src = NULL;
+ }
+ return rtrs_str_to_sockaddr(str, len, port, addr->dst);
+}
+EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free));
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->mutex);
+ pool->pd_flags = pd_flags;
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
+
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
+{
+ mutex_destroy(&pool->mutex);
+ WARN_ON(!list_empty(&pool->list));
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
+
+static void dev_free(struct kref *ref)
+{
+ struct rtrs_rdma_dev_pd *pool;
+ struct rtrs_ib_dev *dev;
+
+ dev = container_of(ref, typeof(*dev), ref);
+ pool = dev->pool;
+
+ mutex_lock(&pool->mutex);
+ list_del(&dev->entry);
+ mutex_unlock(&pool->mutex);
+
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
+ ib_dealloc_pd(dev->ib_pd);
+
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+}
+
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
+{
+ return kref_put(&dev->ref, dev_free);
+}
+EXPORT_SYMBOL(rtrs_ib_dev_put);
+
+static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+struct rtrs_ib_dev *
+rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ struct rtrs_ib_dev *dev;
+
+ mutex_lock(&pool->mutex);
+ list_for_each_entry(dev, &pool->list, entry) {
+ if (dev->ib_dev->node_guid == ib_dev->node_guid &&
+ rtrs_ib_dev_get(dev))
+ goto out_unlock;
+ }
+ mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->alloc)
+ dev = pool->ops->alloc();
+ else
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(dev))
+ goto out_err;
+
+ kref_init(&dev->ref);
+ dev->pool = pool;
+ dev->ib_dev = ib_dev;
+ dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
+ if (IS_ERR(dev->ib_pd))
+ goto out_free_dev;
+
+ if (pool->ops && pool->ops->init && pool->ops->init(dev))
+ goto out_free_pd;
+
+ mutex_lock(&pool->mutex);
+ list_add(&dev->entry, &pool->list);
+out_unlock:
+ mutex_unlock(&pool->mutex);
+ return dev;
+
+out_free_pd:
+ ib_dealloc_pd(dev->ib_pd);
+out_free_dev:
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+out_err:
+ return NULL;
+}
+EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
new file mode 100644
index 000000000..b48b53a7c
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_H
+#define RTRS_H
+
+#include <linux/socket.h>
+#include <linux/scatterlist.h>
+
+struct rtrs_permit;
+struct rtrs_clt_sess;
+struct rtrs_srv_ctx;
+struct rtrs_srv_sess;
+struct rtrs_srv_op;
+
+/*
+ * RDMA transport (RTRS) client API
+ */
+
+/**
+ * enum rtrs_clt_link_ev - Events about connectivity state of a client
+ * @RTRS_CLT_LINK_EV_RECONNECTED Client was reconnected.
+ * @RTRS_CLT_LINK_EV_DISCONNECTED Client was disconnected.
+ */
+enum rtrs_clt_link_ev {
+ RTRS_CLT_LINK_EV_RECONNECTED,
+ RTRS_CLT_LINK_EV_DISCONNECTED,
+};
+
+/**
+ * Source and destination address of a path to be established
+ */
+struct rtrs_addr {
+ struct sockaddr_storage *src;
+ struct sockaddr_storage *dst;
+};
+
+/**
+ * rtrs_clt_ops - it holds the link event callback and private pointer.
+ * @priv: User supplied private data.
+ * @link_ev: Event notification callback function for connection state changes
+ * @priv: User supplied data that was passed to rtrs_clt_open()
+ * @ev: Occurred event
+ */
+struct rtrs_clt_ops {
+ void *priv;
+ void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
+};
+
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ s16 max_reconnect_attempts, u32 nr_poll_queues);
+
+void rtrs_clt_close(struct rtrs_clt_sess *clt);
+
+enum wait_type {
+ RTRS_PERMIT_NOWAIT = 0,
+ RTRS_PERMIT_WAIT = 1
+};
+
+/**
+ * enum rtrs_clt_con_type() type of ib connection to use with a given
+ * rtrs_permit
+ * @ADMIN_CON - use connection reserved for "service" messages
+ * @IO_CON - use a connection reserved for IO
+ */
+enum rtrs_clt_con_type {
+ RTRS_ADMIN_CON,
+ RTRS_IO_CON
+};
+
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_con_type con_type,
+ enum wait_type wait);
+
+void rtrs_clt_put_permit(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit);
+
+/**
+ * rtrs_clt_req_ops - it holds the request confirmation callback
+ * and a private pointer.
+ * @priv: User supplied private data.
+ * @conf_fn: callback function to be called as confirmation
+ * @priv: User provided data, passed back with corresponding
+ * @(conf) confirmation.
+ * @errno: error number.
+ */
+struct rtrs_clt_req_ops {
+ void *priv;
+ void (*conf_fn)(void *priv, int errno);
+};
+
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt_sess *sess, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t len,
+ struct scatterlist *sg, unsigned int sg_cnt);
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index);
+
+/**
+ * rtrs_attrs - RTRS session attributes
+ */
+struct rtrs_attrs {
+ u32 queue_depth;
+ u32 max_io_size;
+ u32 max_segments;
+};
+
+int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr);
+
+/*
+ * Here goes RTRS server API
+ */
+
+/**
+ * enum rtrs_srv_link_ev - Server link events
+ * @RTRS_SRV_LINK_EV_CONNECTED: Connection from client established
+ * @RTRS_SRV_LINK_EV_DISCONNECTED: Connection was disconnected, all
+ * connection RTRS resources were freed.
+ */
+enum rtrs_srv_link_ev {
+ RTRS_SRV_LINK_EV_CONNECTED,
+ RTRS_SRV_LINK_EV_DISCONNECTED,
+};
+
+struct rtrs_srv_ops {
+ /**
+ * rdma_ev(): Event notification for RDMA operations
+ * If the callback returns a value != 0, an error
+ * message for the data transfer will be sent to
+ * the client.
+
+ * @priv: Private data set by rtrs_srv_set_sess_priv()
+ * @id: internal RTRS operation id
+ * @data: Pointer to (bidirectional) rdma memory area:
+ * - in case of %RTRS_SRV_RDMA_EV_RECV contains
+ * data sent by the client
+ * - in case of %RTRS_SRV_RDMA_EV_WRITE_REQ points
+ * to the memory area where the response is to be
+ * written to
+ * @datalen: Size of the memory area in @data
+ * @usr: The extra user message sent by the client (%vec)
+ * @usrlen: Size of the user message
+ */
+ int (*rdma_ev)(void *priv,
+ struct rtrs_srv_op *id,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen);
+ /**
+ * link_ev(): Events about connectivity state changes
+ * If the callback returns != 0 and the event
+ * %RTRS_SRV_LINK_EV_CONNECTED the corresponding
+ * session will be destroyed.
+ * @sess: Session
+ * @ev: event
+ * @priv: Private data from user if previously set with
+ * rtrs_srv_set_sess_priv()
+ */
+ int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev,
+ void *priv);
+};
+
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port);
+
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
+
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
+
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv);
+
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname,
+ size_t len);
+
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess);
+
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr);
+
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len);
+#endif
diff --git a/drivers/infiniband/ulp/srp/Kbuild b/drivers/infiniband/ulp/srp/Kbuild
new file mode 100644
index 000000000..d1f4e513b
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_SRP) += ib_srp.o
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
new file mode 100644
index 000000000..67cd63d13
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_SRP
+ tristate "InfiniBand SCSI RDMA Protocol"
+ depends on SCSI && INFINIBAND_ADDR_TRANS
+ select SCSI_SRP_ATTRS
+ help
+ Support for the SCSI RDMA Protocol over InfiniBand. This
+ allows you to access storage devices that speak SRP over
+ InfiniBand.
+
+ The SRP protocol is defined by the INCITS T10 technical
+ committee. See <http://www.t10.org/>.
+
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
new file mode 100644
index 000000000..c4dcef76e
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -0,0 +1,4225 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/lockdep.h>
+#include <linux/inet.h>
+#include <rdma/ib_cache.h>
+
+#include <linux/atomic.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/srp.h>
+#include <scsi/scsi_transport_srp.h>
+
+#include "ib_srp.h"
+
+#define DRV_NAME "ib_srp"
+#define PFX DRV_NAME ": "
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
+MODULE_LICENSE("Dual BSD/GPL");
+
+#if !defined(CONFIG_DYNAMIC_DEBUG)
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
+#endif
+
+static unsigned int srp_sg_tablesize;
+static unsigned int cmd_sg_entries;
+static unsigned int indirect_sg_entries;
+static bool allow_ext_sg;
+static bool register_always = true;
+static bool never_register;
+static int topspin_workarounds = 1;
+
+module_param(srp_sg_tablesize, uint, 0444);
+MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
+
+module_param(cmd_sg_entries, uint, 0444);
+MODULE_PARM_DESC(cmd_sg_entries,
+ "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
+
+module_param(indirect_sg_entries, uint, 0444);
+MODULE_PARM_DESC(indirect_sg_entries,
+ "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
+
+module_param(allow_ext_sg, bool, 0444);
+MODULE_PARM_DESC(allow_ext_sg,
+ "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
+
+module_param(topspin_workarounds, int, 0444);
+MODULE_PARM_DESC(topspin_workarounds,
+ "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
+
+module_param(register_always, bool, 0444);
+MODULE_PARM_DESC(register_always,
+ "Use memory registration even for contiguous memory regions");
+
+module_param(never_register, bool, 0444);
+MODULE_PARM_DESC(never_register, "Never register memory");
+
+static const struct kernel_param_ops srp_tmo_ops;
+
+static int srp_reconnect_delay = 10;
+module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
+
+static int srp_fast_io_fail_tmo = 15;
+module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_io_fail_tmo,
+ "Number of seconds between the observation of a transport"
+ " layer error and failing all I/O. \"off\" means that this"
+ " functionality is disabled.");
+
+static int srp_dev_loss_tmo = 600;
+module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo,
+ "Maximum number of seconds that the SRP transport should"
+ " insulate transport layer errors. After this time has been"
+ " exceeded the SCSI host is removed. Should be"
+ " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ " if fast_io_fail_tmo has not been set. \"off\" means that"
+ " this functionality is disabled.");
+
+static bool srp_use_imm_data = true;
+module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
+MODULE_PARM_DESC(use_imm_data,
+ "Whether or not to request permission to use immediate data during SRP login.");
+
+static unsigned int srp_max_imm_data = 8 * 1024;
+module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
+MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
+
+static unsigned ch_count;
+module_param(ch_count, uint, 0444);
+MODULE_PARM_DESC(ch_count,
+ "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
+
+static int srp_add_one(struct ib_device *device);
+static void srp_remove_one(struct ib_device *device, void *client_data);
+static void srp_rename_dev(struct ib_device *device, void *client_data);
+static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
+ const char *opname);
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event);
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
+
+static struct scsi_transport_template *ib_srp_transport_template;
+static struct workqueue_struct *srp_remove_wq;
+
+static struct ib_client srp_client = {
+ .name = "srp",
+ .add = srp_add_one,
+ .remove = srp_remove_one,
+ .rename = srp_rename_dev
+};
+
+static struct ib_sa_client srp_sa_client;
+
+static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
+{
+ int tmo = *(int *)kp->arg;
+
+ if (tmo >= 0)
+ return sysfs_emit(buffer, "%d\n", tmo);
+ else
+ return sysfs_emit(buffer, "off\n");
+}
+
+static int srp_tmo_set(const char *val, const struct kernel_param *kp)
+{
+ int tmo, res;
+
+ res = srp_parse_tmo(&tmo, val);
+ if (res)
+ goto out;
+
+ if (kp->arg == &srp_reconnect_delay)
+ res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
+ srp_dev_loss_tmo);
+ else if (kp->arg == &srp_fast_io_fail_tmo)
+ res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
+ else
+ res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
+ tmo);
+ if (res)
+ goto out;
+ *(int *)kp->arg = tmo;
+
+out:
+ return res;
+}
+
+static const struct kernel_param_ops srp_tmo_ops = {
+ .get = srp_tmo_get,
+ .set = srp_tmo_set,
+};
+
+static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
+{
+ return (struct srp_target_port *) host->hostdata;
+}
+
+static const char *srp_target_info(struct Scsi_Host *host)
+{
+ return host_to_target(host)->target_name;
+}
+
+static int srp_target_is_topspin(struct srp_target_port *target)
+{
+ static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
+ static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
+
+ return topspin_workarounds &&
+ (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
+ !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
+}
+
+static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
+ gfp_t gfp_mask,
+ enum dma_data_direction direction)
+{
+ struct srp_iu *iu;
+
+ iu = kmalloc(sizeof *iu, gfp_mask);
+ if (!iu)
+ goto out;
+
+ iu->buf = kzalloc(size, gfp_mask);
+ if (!iu->buf)
+ goto out_free_iu;
+
+ iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
+ direction);
+ if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
+ goto out_free_buf;
+
+ iu->size = size;
+ iu->direction = direction;
+
+ return iu;
+
+out_free_buf:
+ kfree(iu->buf);
+out_free_iu:
+ kfree(iu);
+out:
+ return NULL;
+}
+
+static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
+{
+ if (!iu)
+ return;
+
+ ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
+ iu->direction);
+ kfree(iu->buf);
+ kfree(iu);
+}
+
+static void srp_qp_event(struct ib_event *event, void *context)
+{
+ pr_debug("QP event %s (%d)\n",
+ ib_event_msg(event->event), event->event);
+}
+
+static int srp_init_ib_qp(struct srp_target_port *target,
+ struct ib_qp *qp)
+{
+ struct ib_qp_attr *attr;
+ int ret;
+
+ attr = kmalloc(sizeof *attr, GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
+ target->srp_host->port,
+ be16_to_cpu(target->ib_cm.pkey),
+ &attr->pkey_index);
+ if (ret)
+ goto out;
+
+ attr->qp_state = IB_QPS_INIT;
+ attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ attr->port_num = target->srp_host->port;
+
+ ret = ib_modify_qp(qp, attr,
+ IB_QP_STATE |
+ IB_QP_PKEY_INDEX |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT);
+
+out:
+ kfree(attr);
+ return ret;
+}
+
+static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct ib_cm_id *new_cm_id;
+
+ new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
+ srp_ib_cm_handler, ch);
+ if (IS_ERR(new_cm_id))
+ return PTR_ERR(new_cm_id);
+
+ if (ch->ib_cm.cm_id)
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = new_cm_id;
+ if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
+ target->srp_host->port))
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
+ ch->ib_cm.path.sgid = target->sgid;
+ ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
+ ch->ib_cm.path.pkey = target->ib_cm.pkey;
+ ch->ib_cm.path.service_id = target->ib_cm.service_id;
+
+ return 0;
+}
+
+static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct rdma_cm_id *new_cm_id;
+ int ret;
+
+ new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(new_cm_id)) {
+ ret = PTR_ERR(new_cm_id);
+ new_cm_id = NULL;
+ goto out;
+ }
+
+ init_completion(&ch->done);
+ ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
+ &target->rdma_cm.src.sa : NULL,
+ &target->rdma_cm.dst.sa,
+ SRP_PATH_REC_TIMEOUT_MS);
+ if (ret) {
+ pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
+ &target->rdma_cm.src, &target->rdma_cm.dst, ret);
+ goto out;
+ }
+ ret = wait_for_completion_interruptible(&ch->done);
+ if (ret < 0)
+ goto out;
+
+ ret = ch->status;
+ if (ret) {
+ pr_err("Resolving address %pISpsc failed (%d)\n",
+ &target->rdma_cm.dst, ret);
+ goto out;
+ }
+
+ swap(ch->rdma_cm.cm_id, new_cm_id);
+
+out:
+ if (new_cm_id)
+ rdma_destroy_id(new_cm_id);
+
+ return ret;
+}
+
+static int srp_new_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
+ srp_new_ib_cm_id(ch);
+}
+
+/**
+ * srp_destroy_fr_pool() - free the resources owned by a pool
+ * @pool: Fast registration pool to be destroyed.
+ */
+static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
+{
+ int i;
+ struct srp_fr_desc *d;
+
+ if (!pool)
+ return;
+
+ for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
+ if (d->mr)
+ ib_dereg_mr(d->mr);
+ }
+ kfree(pool);
+}
+
+/**
+ * srp_create_fr_pool() - allocate and initialize a pool for fast registration
+ * @device: IB device to allocate fast registration descriptors for.
+ * @pd: Protection domain associated with the FR descriptors.
+ * @pool_size: Number of descriptors to allocate.
+ * @max_page_list_len: Maximum fast registration work request page list length.
+ */
+static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
+ struct ib_pd *pd, int pool_size,
+ int max_page_list_len)
+{
+ struct srp_fr_pool *pool;
+ struct srp_fr_desc *d;
+ struct ib_mr *mr;
+ int i, ret = -EINVAL;
+ enum ib_mr_type mr_type;
+
+ if (pool_size <= 0)
+ goto err;
+ ret = -ENOMEM;
+ pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->size = pool_size;
+ pool->max_page_list_len = max_page_list_len;
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free_list);
+
+ if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
+ for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
+ mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
+ if (IS_ERR(mr)) {
+ ret = PTR_ERR(mr);
+ if (ret == -ENOMEM)
+ pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
+ dev_name(&device->dev));
+ goto destroy_pool;
+ }
+ d->mr = mr;
+ list_add_tail(&d->entry, &pool->free_list);
+ }
+
+out:
+ return pool;
+
+destroy_pool:
+ srp_destroy_fr_pool(pool);
+
+err:
+ pool = ERR_PTR(ret);
+ goto out;
+}
+
+/**
+ * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
+ * @pool: Pool to obtain descriptor from.
+ */
+static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
+{
+ struct srp_fr_desc *d = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ if (!list_empty(&pool->free_list)) {
+ d = list_first_entry(&pool->free_list, typeof(*d), entry);
+ list_del(&d->entry);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return d;
+}
+
+/**
+ * srp_fr_pool_put() - put an FR descriptor back in the free list
+ * @pool: Pool the descriptor was allocated from.
+ * @desc: Pointer to an array of fast registration descriptor pointers.
+ * @n: Number of descriptors to put back.
+ *
+ * Note: The caller must already have queued an invalidation request for
+ * desc->mr->rkey before calling this function.
+ */
+static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
+ int n)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ for (i = 0; i < n; i++)
+ list_add(&desc[i]->entry, &pool->free_list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
+{
+ struct srp_device *dev = target->srp_host->srp_dev;
+
+ return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
+ dev->max_pages_per_mr);
+}
+
+/**
+ * srp_destroy_qp() - destroy an RDMA queue pair
+ * @ch: SRP RDMA channel.
+ *
+ * Drain the qp before destroying it. This avoids that the receive
+ * completion handler can access the queue pair while it is
+ * being destroyed.
+ */
+static void srp_destroy_qp(struct srp_rdma_ch *ch)
+{
+ spin_lock_irq(&ch->lock);
+ ib_process_cq_direct(ch->send_cq, -1);
+ spin_unlock_irq(&ch->lock);
+
+ ib_drain_qp(ch->qp);
+ ib_destroy_qp(ch->qp);
+}
+
+static int srp_create_ch_ib(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ const struct ib_device_attr *attr = &dev->dev->attrs;
+ struct ib_qp_init_attr *init_attr;
+ struct ib_cq *recv_cq, *send_cq;
+ struct ib_qp *qp;
+ struct srp_fr_pool *fr_pool = NULL;
+ const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
+ int ret;
+
+ init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
+ if (!init_attr)
+ return -ENOMEM;
+
+ /* queue_size + 1 for ib_drain_rq() */
+ recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
+ ch->comp_vector, IB_POLL_SOFTIRQ);
+ if (IS_ERR(recv_cq)) {
+ ret = PTR_ERR(recv_cq);
+ goto err;
+ }
+
+ send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
+ ch->comp_vector, IB_POLL_DIRECT);
+ if (IS_ERR(send_cq)) {
+ ret = PTR_ERR(send_cq);
+ goto err_recv_cq;
+ }
+
+ init_attr->event_handler = srp_qp_event;
+ init_attr->cap.max_send_wr = m * target->queue_size;
+ init_attr->cap.max_recv_wr = target->queue_size + 1;
+ init_attr->cap.max_recv_sge = 1;
+ init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr->qp_type = IB_QPT_RC;
+ init_attr->send_cq = send_cq;
+ init_attr->recv_cq = recv_cq;
+
+ ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
+
+ if (target->using_rdma_cm) {
+ ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
+ qp = ch->rdma_cm.cm_id->qp;
+ } else {
+ qp = ib_create_qp(dev->pd, init_attr);
+ if (!IS_ERR(qp)) {
+ ret = srp_init_ib_qp(target, qp);
+ if (ret)
+ ib_destroy_qp(qp);
+ } else {
+ ret = PTR_ERR(qp);
+ }
+ }
+ if (ret) {
+ pr_err("QP creation failed for dev %s: %d\n",
+ dev_name(&dev->dev->dev), ret);
+ goto err_send_cq;
+ }
+
+ if (dev->use_fast_reg) {
+ fr_pool = srp_alloc_fr_pool(target);
+ if (IS_ERR(fr_pool)) {
+ ret = PTR_ERR(fr_pool);
+ shost_printk(KERN_WARNING, target->scsi_host, PFX
+ "FR pool allocation failed (%d)\n", ret);
+ goto err_qp;
+ }
+ }
+
+ if (ch->qp)
+ srp_destroy_qp(ch);
+ if (ch->recv_cq)
+ ib_free_cq(ch->recv_cq);
+ if (ch->send_cq)
+ ib_free_cq(ch->send_cq);
+
+ ch->qp = qp;
+ ch->recv_cq = recv_cq;
+ ch->send_cq = send_cq;
+
+ if (dev->use_fast_reg) {
+ if (ch->fr_pool)
+ srp_destroy_fr_pool(ch->fr_pool);
+ ch->fr_pool = fr_pool;
+ }
+
+ kfree(init_attr);
+ return 0;
+
+err_qp:
+ if (target->using_rdma_cm)
+ rdma_destroy_qp(ch->rdma_cm.cm_id);
+ else
+ ib_destroy_qp(qp);
+
+err_send_cq:
+ ib_free_cq(send_cq);
+
+err_recv_cq:
+ ib_free_cq(recv_cq);
+
+err:
+ kfree(init_attr);
+ return ret;
+}
+
+/*
+ * Note: this function may be called without srp_alloc_iu_bufs() having been
+ * invoked. Hence the ch->[rt]x_ring checks.
+ */
+static void srp_free_ch_ib(struct srp_target_port *target,
+ struct srp_rdma_ch *ch)
+{
+ struct srp_device *dev = target->srp_host->srp_dev;
+ int i;
+
+ if (!ch->target)
+ return;
+
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id) {
+ rdma_destroy_id(ch->rdma_cm.cm_id);
+ ch->rdma_cm.cm_id = NULL;
+ }
+ } else {
+ if (ch->ib_cm.cm_id) {
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = NULL;
+ }
+ }
+
+ /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
+ if (!ch->qp)
+ return;
+
+ if (dev->use_fast_reg) {
+ if (ch->fr_pool)
+ srp_destroy_fr_pool(ch->fr_pool);
+ }
+
+ srp_destroy_qp(ch);
+ ib_free_cq(ch->send_cq);
+ ib_free_cq(ch->recv_cq);
+
+ /*
+ * Avoid that the SCSI error handler tries to use this channel after
+ * it has been freed. The SCSI error handler can namely continue
+ * trying to perform recovery actions after scsi_remove_host()
+ * returned.
+ */
+ ch->target = NULL;
+
+ ch->qp = NULL;
+ ch->send_cq = ch->recv_cq = NULL;
+
+ if (ch->rx_ring) {
+ for (i = 0; i < target->queue_size; ++i)
+ srp_free_iu(target->srp_host, ch->rx_ring[i]);
+ kfree(ch->rx_ring);
+ ch->rx_ring = NULL;
+ }
+ if (ch->tx_ring) {
+ for (i = 0; i < target->queue_size; ++i)
+ srp_free_iu(target->srp_host, ch->tx_ring[i]);
+ kfree(ch->tx_ring);
+ ch->tx_ring = NULL;
+ }
+}
+
+static void srp_path_rec_completion(int status,
+ struct sa_path_rec *pathrec,
+ int num_paths, void *ch_ptr)
+{
+ struct srp_rdma_ch *ch = ch_ptr;
+ struct srp_target_port *target = ch->target;
+
+ ch->status = status;
+ if (status)
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Got failed path rec status %d\n", status);
+ else
+ ch->ib_cm.path = *pathrec;
+ complete(&ch->done);
+}
+
+static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ int ret;
+
+ ch->ib_cm.path.numb_path = 1;
+
+ init_completion(&ch->done);
+
+ ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
+ target->srp_host->srp_dev->dev,
+ target->srp_host->port,
+ &ch->ib_cm.path,
+ IB_SA_PATH_REC_SERVICE_ID |
+ IB_SA_PATH_REC_DGID |
+ IB_SA_PATH_REC_SGID |
+ IB_SA_PATH_REC_NUMB_PATH |
+ IB_SA_PATH_REC_PKEY,
+ SRP_PATH_REC_TIMEOUT_MS,
+ GFP_KERNEL,
+ srp_path_rec_completion,
+ ch, &ch->ib_cm.path_query);
+ if (ch->ib_cm.path_query_id < 0)
+ return ch->ib_cm.path_query_id;
+
+ ret = wait_for_completion_interruptible(&ch->done);
+ if (ret < 0)
+ return ret;
+
+ if (ch->status < 0)
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
+ ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id));
+
+ return ch->status;
+}
+
+static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ int ret;
+
+ init_completion(&ch->done);
+
+ ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
+ if (ret)
+ return ret;
+
+ wait_for_completion_interruptible(&ch->done);
+
+ if (ch->status != 0)
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Path resolution failed\n");
+
+ return ch->status;
+}
+
+static int srp_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
+ srp_ib_lookup_path(ch);
+}
+
+static u8 srp_get_subnet_timeout(struct srp_host *host)
+{
+ struct ib_port_attr attr;
+ int ret;
+ u8 subnet_timeout = 18;
+
+ ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
+ if (ret == 0)
+ subnet_timeout = attr.subnet_timeout;
+
+ if (unlikely(subnet_timeout < 15))
+ pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
+ dev_name(&host->srp_dev->dev->dev), subnet_timeout);
+
+ return subnet_timeout;
+}
+
+static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
+ bool multich)
+{
+ struct srp_target_port *target = ch->target;
+ struct {
+ struct rdma_conn_param rdma_param;
+ struct srp_login_req_rdma rdma_req;
+ struct ib_cm_req_param ib_param;
+ struct srp_login_req ib_req;
+ } *req = NULL;
+ char *ipi, *tpi;
+ int status;
+
+ req = kzalloc(sizeof *req, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->ib_param.flow_control = 1;
+ req->ib_param.retry_count = target->tl_retry_count;
+
+ /*
+ * Pick some arbitrary defaults here; we could make these
+ * module parameters if anyone cared about setting them.
+ */
+ req->ib_param.responder_resources = 4;
+ req->ib_param.rnr_retry_count = 7;
+ req->ib_param.max_cm_retries = 15;
+
+ req->ib_req.opcode = SRP_LOGIN_REQ;
+ req->ib_req.tag = 0;
+ req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
+ req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
+ req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
+ SRP_MULTICHAN_SINGLE);
+ if (srp_use_imm_data) {
+ req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
+ req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
+ }
+
+ if (target->using_rdma_cm) {
+ req->rdma_param.flow_control = req->ib_param.flow_control;
+ req->rdma_param.responder_resources =
+ req->ib_param.responder_resources;
+ req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
+ req->rdma_param.retry_count = req->ib_param.retry_count;
+ req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
+ req->rdma_param.private_data = &req->rdma_req;
+ req->rdma_param.private_data_len = sizeof(req->rdma_req);
+
+ req->rdma_req.opcode = req->ib_req.opcode;
+ req->rdma_req.tag = req->ib_req.tag;
+ req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
+ req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
+ req->rdma_req.req_flags = req->ib_req.req_flags;
+ req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
+
+ ipi = req->rdma_req.initiator_port_id;
+ tpi = req->rdma_req.target_port_id;
+ } else {
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
+
+ req->ib_param.primary_path = &ch->ib_cm.path;
+ req->ib_param.alternate_path = NULL;
+ req->ib_param.service_id = target->ib_cm.service_id;
+ get_random_bytes(&req->ib_param.starting_psn, 4);
+ req->ib_param.starting_psn &= 0xffffff;
+ req->ib_param.qp_num = ch->qp->qp_num;
+ req->ib_param.qp_type = ch->qp->qp_type;
+ req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.private_data = &req->ib_req;
+ req->ib_param.private_data_len = sizeof(req->ib_req);
+
+ ipi = req->ib_req.initiator_port_id;
+ tpi = req->ib_req.target_port_id;
+ }
+
+ /*
+ * In the published SRP specification (draft rev. 16a), the
+ * port identifier format is 8 bytes of ID extension followed
+ * by 8 bytes of GUID. Older drafts put the two halves in the
+ * opposite order, so that the GUID comes first.
+ *
+ * Targets conforming to these obsolete drafts can be
+ * recognized by the I/O Class they report.
+ */
+ if (target->io_class == SRP_REV10_IB_IO_CLASS) {
+ memcpy(ipi, &target->sgid.global.interface_id, 8);
+ memcpy(ipi + 8, &target->initiator_ext, 8);
+ memcpy(tpi, &target->ioc_guid, 8);
+ memcpy(tpi + 8, &target->id_ext, 8);
+ } else {
+ memcpy(ipi, &target->initiator_ext, 8);
+ memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
+ memcpy(tpi, &target->id_ext, 8);
+ memcpy(tpi + 8, &target->ioc_guid, 8);
+ }
+
+ /*
+ * Topspin/Cisco SRP targets will reject our login unless we
+ * zero out the first 8 bytes of our initiator port ID and set
+ * the second 8 bytes to the local node GUID.
+ */
+ if (srp_target_is_topspin(target)) {
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Topspin/Cisco initiator port ID workaround "
+ "activated for target GUID %016llx\n",
+ be64_to_cpu(target->ioc_guid));
+ memset(ipi, 0, 8);
+ memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
+ }
+
+ if (target->using_rdma_cm)
+ status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
+ else
+ status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
+
+ kfree(req);
+
+ return status;
+}
+
+static bool srp_queue_remove_work(struct srp_target_port *target)
+{
+ bool changed = false;
+
+ spin_lock_irq(&target->lock);
+ if (target->state != SRP_TARGET_REMOVED) {
+ target->state = SRP_TARGET_REMOVED;
+ changed = true;
+ }
+ spin_unlock_irq(&target->lock);
+
+ if (changed)
+ queue_work(srp_remove_wq, &target->remove_work);
+
+ return changed;
+}
+
+static void srp_disconnect_target(struct srp_target_port *target)
+{
+ struct srp_rdma_ch *ch;
+ int i, ret;
+
+ /* XXX should send SRP_I_LOGOUT request */
+
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ ch->connected = false;
+ ret = 0;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id)
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ } else {
+ if (ch->ib_cm.cm_id)
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
+ NULL, 0);
+ }
+ if (ret < 0) {
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM DREQ failed\n");
+ }
+ }
+}
+
+static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct srp_target_port *target = host_to_target(shost);
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = dev->dev;
+ struct srp_request *req = scsi_cmd_priv(cmd);
+
+ kfree(req->fr_list);
+ if (req->indirect_dma_addr) {
+ ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
+ target->indirect_size,
+ DMA_TO_DEVICE);
+ }
+ kfree(req->indirect_desc);
+
+ return 0;
+}
+
+static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct srp_target_port *target = host_to_target(shost);
+ struct srp_device *srp_dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = srp_dev->dev;
+ struct srp_request *req = scsi_cmd_priv(cmd);
+ dma_addr_t dma_addr;
+ int ret = -ENOMEM;
+
+ if (srp_dev->use_fast_reg) {
+ req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
+ GFP_KERNEL);
+ if (!req->fr_list)
+ goto out;
+ }
+ req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
+ if (!req->indirect_desc)
+ goto out;
+
+ dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
+ target->indirect_size,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ibdev, dma_addr)) {
+ srp_exit_cmd_priv(shost, cmd);
+ goto out;
+ }
+
+ req->indirect_dma_addr = dma_addr;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
+ * @shost: SCSI host whose attributes to remove from sysfs.
+ *
+ * Note: Any attributes defined in the host template and that did not exist
+ * before invocation of this function will be ignored.
+ */
+static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
+{
+ const struct attribute_group **g;
+ struct attribute **attr;
+
+ for (g = shost->hostt->shost_groups; *g; ++g) {
+ for (attr = (*g)->attrs; *attr; ++attr) {
+ struct device_attribute *dev_attr =
+ container_of(*attr, typeof(*dev_attr), attr);
+
+ device_remove_file(&shost->shost_dev, dev_attr);
+ }
+ }
+}
+
+static void srp_remove_target(struct srp_target_port *target)
+{
+ struct srp_rdma_ch *ch;
+ int i;
+
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
+
+ srp_del_scsi_host_attr(target->scsi_host);
+ srp_rport_get(target->rport);
+ srp_remove_host(target->scsi_host);
+ scsi_remove_host(target->scsi_host);
+ srp_stop_rport_timers(target->rport);
+ srp_disconnect_target(target);
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ srp_free_ch_ib(target, ch);
+ }
+ cancel_work_sync(&target->tl_err_work);
+ srp_rport_put(target->rport);
+ kfree(target->ch);
+ target->ch = NULL;
+
+ spin_lock(&target->srp_host->target_lock);
+ list_del(&target->list);
+ spin_unlock(&target->srp_host->target_lock);
+
+ scsi_host_put(target->scsi_host);
+}
+
+static void srp_remove_work(struct work_struct *work)
+{
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, remove_work);
+
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
+
+ srp_remove_target(target);
+}
+
+static void srp_rport_delete(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+
+ srp_queue_remove_work(target);
+}
+
+/**
+ * srp_connected_ch() - number of connected channels
+ * @target: SRP target port.
+ */
+static int srp_connected_ch(struct srp_target_port *target)
+{
+ int i, c = 0;
+
+ for (i = 0; i < target->ch_count; i++)
+ c += target->ch[i].connected;
+
+ return c;
+}
+
+static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
+ bool multich)
+{
+ struct srp_target_port *target = ch->target;
+ int ret;
+
+ WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
+
+ ret = srp_lookup_path(ch);
+ if (ret)
+ goto out;
+
+ while (1) {
+ init_completion(&ch->done);
+ ret = srp_send_req(ch, max_iu_len, multich);
+ if (ret)
+ goto out;
+ ret = wait_for_completion_interruptible(&ch->done);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * The CM event handling code will set status to
+ * SRP_PORT_REDIRECT if we get a port redirect REJ
+ * back, or SRP_DLID_REDIRECT if we get a lid/qp
+ * redirect REJ back.
+ */
+ ret = ch->status;
+ switch (ret) {
+ case 0:
+ ch->connected = true;
+ goto out;
+
+ case SRP_PORT_REDIRECT:
+ ret = srp_lookup_path(ch);
+ if (ret)
+ goto out;
+ break;
+
+ case SRP_DLID_REDIRECT:
+ break;
+
+ case SRP_STALE_CONN:
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "giving up on stale connection\n");
+ ret = -ECONNRESET;
+ goto out;
+
+ default:
+ goto out;
+ }
+ }
+
+out:
+ return ret <= 0 ? ret : -ENODEV;
+}
+
+static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ srp_handle_qp_err(cq, wc, "INV RKEY");
+}
+
+static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
+ u32 rkey)
+{
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .next = NULL,
+ .num_sge = 0,
+ .send_flags = 0,
+ .ex.invalidate_rkey = rkey,
+ };
+
+ wr.wr_cqe = &req->reg_cqe;
+ req->reg_cqe.done = srp_inv_rkey_err_done;
+ return ib_post_send(ch->qp, &wr, NULL);
+}
+
+static void srp_unmap_data(struct scsi_cmnd *scmnd,
+ struct srp_rdma_ch *ch,
+ struct srp_request *req)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = dev->dev;
+ int i, res;
+
+ if (!scsi_sglist(scmnd) ||
+ (scmnd->sc_data_direction != DMA_TO_DEVICE &&
+ scmnd->sc_data_direction != DMA_FROM_DEVICE))
+ return;
+
+ if (dev->use_fast_reg) {
+ struct srp_fr_desc **pfr;
+
+ for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
+ res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
+ if (res < 0) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ (*pfr)->mr->rkey, res);
+ queue_work(system_long_wq,
+ &target->tl_err_work);
+ }
+ }
+ if (req->nmdesc)
+ srp_fr_pool_put(ch->fr_pool, req->fr_list,
+ req->nmdesc);
+ }
+
+ ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
+ scmnd->sc_data_direction);
+}
+
+/**
+ * srp_claim_req - Take ownership of the scmnd associated with a request.
+ * @ch: SRP RDMA channel.
+ * @req: SRP request.
+ * @sdev: If not NULL, only take ownership for this SCSI device.
+ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
+ * ownership of @req->scmnd if it equals @scmnd.
+ *
+ * Return value:
+ * Either NULL or a pointer to the SCSI command the caller became owner of.
+ */
+static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
+ struct srp_request *req,
+ struct scsi_device *sdev,
+ struct scsi_cmnd *scmnd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ if (req->scmnd &&
+ (!sdev || req->scmnd->device == sdev) &&
+ (!scmnd || req->scmnd == scmnd)) {
+ scmnd = req->scmnd;
+ req->scmnd = NULL;
+ } else {
+ scmnd = NULL;
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ return scmnd;
+}
+
+/**
+ * srp_free_req() - Unmap data and adjust ch->req_lim.
+ * @ch: SRP RDMA channel.
+ * @req: Request to be freed.
+ * @scmnd: SCSI command associated with @req.
+ * @req_lim_delta: Amount to be added to @target->req_lim.
+ */
+static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
+ struct scsi_cmnd *scmnd, s32 req_lim_delta)
+{
+ unsigned long flags;
+
+ srp_unmap_data(scmnd, ch, req);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->req_lim += req_lim_delta;
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
+ struct scsi_device *sdev, int result)
+{
+ struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
+
+ if (scmnd) {
+ srp_free_req(ch, req, scmnd, 0);
+ scmnd->result = result;
+ scsi_done(scmnd);
+ }
+}
+
+struct srp_terminate_context {
+ struct srp_target_port *srp_target;
+ int scsi_result;
+};
+
+static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
+{
+ struct srp_terminate_context *context = context_ptr;
+ struct srp_target_port *target = context->srp_target;
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
+ struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
+ struct srp_request *req = scsi_cmd_priv(scmnd);
+
+ srp_finish_req(ch, req, NULL, context->scsi_result);
+
+ return true;
+}
+
+static void srp_terminate_io(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+ struct srp_terminate_context context = { .srp_target = target,
+ .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
+
+ scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
+}
+
+/* Calculate maximum initiator to target information unit length. */
+static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
+ uint32_t max_it_iu_size)
+{
+ uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
+ sizeof(struct srp_indirect_buf) +
+ cmd_sg_cnt * sizeof(struct srp_direct_buf);
+
+ if (use_imm_data)
+ max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
+ srp_max_imm_data);
+
+ if (max_it_iu_size)
+ max_iu_len = min(max_iu_len, max_it_iu_size);
+
+ pr_debug("max_iu_len = %d\n", max_iu_len);
+
+ return max_iu_len;
+}
+
+/*
+ * It is up to the caller to ensure that srp_rport_reconnect() calls are
+ * serialized and that no concurrent srp_queuecommand(), srp_abort(),
+ * srp_reset_device() or srp_reset_host() calls will occur while this function
+ * is in progress. One way to realize that is not to call this function
+ * directly but to call srp_reconnect_rport() instead since that last function
+ * serializes calls of this function via rport->mutex and also blocks
+ * srp_queuecommand() calls before invoking this function.
+ */
+static int srp_rport_reconnect(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+ struct srp_rdma_ch *ch;
+ uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ srp_use_imm_data,
+ target->max_it_iu_size);
+ int i, j, ret = 0;
+ bool multich = false;
+
+ srp_disconnect_target(target);
+
+ if (target->state == SRP_TARGET_SCANNING)
+ return -ENODEV;
+
+ /*
+ * Now get a new local CM ID so that we avoid confusing the target in
+ * case things are really fouled up. Doing so also ensures that all CM
+ * callbacks will have finished before a new QP is allocated.
+ */
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ ret += srp_new_cm_id(ch);
+ }
+ {
+ struct srp_terminate_context context = {
+ .srp_target = target, .scsi_result = DID_RESET << 16};
+
+ scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
+ &context);
+ }
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ /*
+ * Whether or not creating a new CM ID succeeded, create a new
+ * QP. This guarantees that all completion callback function
+ * invocations have finished before request resetting starts.
+ */
+ ret += srp_create_ch_ib(ch);
+
+ INIT_LIST_HEAD(&ch->free_tx);
+ for (j = 0; j < target->queue_size; ++j)
+ list_add(&ch->tx_ring[j]->list, &ch->free_tx);
+ }
+
+ target->qp_in_error = false;
+
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ if (ret)
+ break;
+ ret = srp_connect_ch(ch, max_iu_len, multich);
+ multich = true;
+ }
+
+ if (ret == 0)
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "reconnect succeeded\n");
+
+ return ret;
+}
+
+static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
+ unsigned int dma_len, u32 rkey)
+{
+ struct srp_direct_buf *desc = state->desc;
+
+ WARN_ON_ONCE(!dma_len);
+
+ desc->va = cpu_to_be64(dma_addr);
+ desc->key = cpu_to_be32(rkey);
+ desc->len = cpu_to_be32(dma_len);
+
+ state->total_len += dma_len;
+ state->desc++;
+ state->ndesc++;
+}
+
+static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ srp_handle_qp_err(cq, wc, "FAST REG");
+}
+
+/*
+ * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
+ * where to start in the first element. If sg_offset_p != NULL then
+ * *sg_offset_p is updated to the offset in state->sg[retval] of the first
+ * byte that has not yet been mapped.
+ */
+static int srp_map_finish_fr(struct srp_map_state *state,
+ struct srp_request *req,
+ struct srp_rdma_ch *ch, int sg_nents,
+ unsigned int *sg_offset_p)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_reg_wr wr;
+ struct srp_fr_desc *desc;
+ u32 rkey;
+ int n, err;
+
+ if (state->fr.next >= state->fr.end) {
+ shost_printk(KERN_ERR, ch->target->scsi_host,
+ PFX "Out of MRs (mr_per_cmd = %d)\n",
+ ch->target->mr_per_cmd);
+ return -ENOMEM;
+ }
+
+ WARN_ON_ONCE(!dev->use_fast_reg);
+
+ if (sg_nents == 1 && target->global_rkey) {
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+
+ srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
+ sg_dma_len(state->sg) - sg_offset,
+ target->global_rkey);
+ if (sg_offset_p)
+ *sg_offset_p = 0;
+ return 1;
+ }
+
+ desc = srp_fr_pool_get(ch->fr_pool);
+ if (!desc)
+ return -ENOMEM;
+
+ rkey = ib_inc_rkey(desc->mr->rkey);
+ ib_update_fast_reg_key(desc->mr, rkey);
+
+ n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
+ dev->mr_page_size);
+ if (unlikely(n < 0)) {
+ srp_fr_pool_put(ch->fr_pool, &desc, 1);
+ pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
+ dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
+ sg_offset_p ? *sg_offset_p : -1, n);
+ return n;
+ }
+
+ WARN_ON_ONCE(desc->mr->length == 0);
+
+ req->reg_cqe.done = srp_reg_mr_err_done;
+
+ wr.wr.next = NULL;
+ wr.wr.opcode = IB_WR_REG_MR;
+ wr.wr.wr_cqe = &req->reg_cqe;
+ wr.wr.num_sge = 0;
+ wr.wr.send_flags = 0;
+ wr.mr = desc->mr;
+ wr.key = desc->mr->rkey;
+ wr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+
+ *state->fr.next++ = desc;
+ state->nmdesc++;
+
+ srp_map_desc(state, desc->mr->iova,
+ desc->mr->length, desc->mr->rkey);
+
+ err = ib_post_send(ch->qp, &wr.wr, NULL);
+ if (unlikely(err)) {
+ WARN_ON_ONCE(err == -ENOMEM);
+ return err;
+ }
+
+ return n;
+}
+
+static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
+ struct srp_request *req, struct scatterlist *scat,
+ int count)
+{
+ unsigned int sg_offset = 0;
+
+ state->fr.next = req->fr_list;
+ state->fr.end = req->fr_list + ch->target->mr_per_cmd;
+ state->sg = scat;
+
+ if (count == 0)
+ return 0;
+
+ while (count) {
+ int i, n;
+
+ n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
+ if (unlikely(n < 0))
+ return n;
+
+ count -= n;
+ for (i = 0; i < n; i++)
+ state->sg = sg_next(state->sg);
+ }
+
+ return 0;
+}
+
+static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
+ struct srp_request *req, struct scatterlist *scat,
+ int count)
+{
+ struct srp_target_port *target = ch->target;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(scat, sg, count, i) {
+ srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
+ target->global_rkey);
+ }
+
+ return 0;
+}
+
+/*
+ * Register the indirect data buffer descriptor with the HCA.
+ *
+ * Note: since the indirect data buffer descriptor has been allocated with
+ * kmalloc() it is guaranteed that this buffer is a physically contiguous
+ * memory buffer.
+ */
+static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
+ void **next_mr, void **end_mr, u32 idb_len,
+ __be32 *idb_rkey)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_device *dev = target->srp_host->srp_dev;
+ struct srp_map_state state;
+ struct srp_direct_buf idb_desc;
+ struct scatterlist idb_sg[1];
+ int ret;
+
+ memset(&state, 0, sizeof(state));
+ memset(&idb_desc, 0, sizeof(idb_desc));
+ state.gen.next = next_mr;
+ state.gen.end = end_mr;
+ state.desc = &idb_desc;
+ state.base_dma_addr = req->indirect_dma_addr;
+ state.dma_len = idb_len;
+
+ if (dev->use_fast_reg) {
+ state.sg = idb_sg;
+ sg_init_one(idb_sg, req->indirect_desc, idb_len);
+ idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ idb_sg->dma_length = idb_sg->length; /* hack^2 */
+#endif
+ ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
+ if (ret < 0)
+ return ret;
+ WARN_ON_ONCE(ret < 1);
+ } else {
+ return -EINVAL;
+ }
+
+ *idb_rkey = idb_desc.key;
+
+ return 0;
+}
+
+static void srp_check_mapping(struct srp_map_state *state,
+ struct srp_rdma_ch *ch, struct srp_request *req,
+ struct scatterlist *scat, int count)
+{
+ struct srp_device *dev = ch->target->srp_host->srp_dev;
+ struct srp_fr_desc **pfr;
+ u64 desc_len = 0, mr_len = 0;
+ int i;
+
+ for (i = 0; i < state->ndesc; i++)
+ desc_len += be32_to_cpu(req->indirect_desc[i].len);
+ if (dev->use_fast_reg)
+ for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
+ mr_len += (*pfr)->mr->length;
+ if (desc_len != scsi_bufflen(req->scmnd) ||
+ mr_len > scsi_bufflen(req->scmnd))
+ pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
+ scsi_bufflen(req->scmnd), desc_len, mr_len,
+ state->ndesc, state->nmdesc);
+}
+
+/**
+ * srp_map_data() - map SCSI data buffer onto an SRP request
+ * @scmnd: SCSI command to map
+ * @ch: SRP RDMA channel
+ * @req: SRP request
+ *
+ * Returns the length in bytes of the SRP_CMD IU or a negative value if
+ * mapping failed. The size of any immediate data is not included in the
+ * return value.
+ */
+static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
+ struct srp_request *req)
+{
+ struct srp_target_port *target = ch->target;
+ struct scatterlist *scat, *sg;
+ struct srp_cmd *cmd = req->cmd->buf;
+ int i, len, nents, count, ret;
+ struct srp_device *dev;
+ struct ib_device *ibdev;
+ struct srp_map_state state;
+ struct srp_indirect_buf *indirect_hdr;
+ u64 data_len;
+ u32 idb_len, table_len;
+ __be32 idb_rkey;
+ u8 fmt;
+
+ req->cmd->num_sge = 1;
+
+ if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
+ return sizeof(struct srp_cmd) + cmd->add_cdb_len;
+
+ if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
+ scmnd->sc_data_direction != DMA_TO_DEVICE) {
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled data direction %d\n",
+ scmnd->sc_data_direction);
+ return -EINVAL;
+ }
+
+ nents = scsi_sg_count(scmnd);
+ scat = scsi_sglist(scmnd);
+ data_len = scsi_bufflen(scmnd);
+
+ dev = target->srp_host->srp_dev;
+ ibdev = dev->dev;
+
+ count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
+ if (unlikely(count == 0))
+ return -EIO;
+
+ if (ch->use_imm_data &&
+ count <= ch->max_imm_sge &&
+ SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
+ scmnd->sc_data_direction == DMA_TO_DEVICE) {
+ struct srp_imm_buf *buf;
+ struct ib_sge *sge = &req->cmd->sge[1];
+
+ fmt = SRP_DATA_DESC_IMM;
+ len = SRP_IMM_DATA_OFFSET;
+ req->nmdesc = 0;
+ buf = (void *)cmd->add_data + cmd->add_cdb_len;
+ buf->len = cpu_to_be32(data_len);
+ WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
+ for_each_sg(scat, sg, count, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = target->lkey;
+ }
+ req->cmd->num_sge += count;
+ goto map_complete;
+ }
+
+ fmt = SRP_DATA_DESC_DIRECT;
+ len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
+ sizeof(struct srp_direct_buf);
+
+ if (count == 1 && target->global_rkey) {
+ /*
+ * The midlayer only generated a single gather/scatter
+ * entry, or DMA mapping coalesced everything to a
+ * single entry. So a direct descriptor along with
+ * the DMA MR suffices.
+ */
+ struct srp_direct_buf *buf;
+
+ buf = (void *)cmd->add_data + cmd->add_cdb_len;
+ buf->va = cpu_to_be64(sg_dma_address(scat));
+ buf->key = cpu_to_be32(target->global_rkey);
+ buf->len = cpu_to_be32(sg_dma_len(scat));
+
+ req->nmdesc = 0;
+ goto map_complete;
+ }
+
+ /*
+ * We have more than one scatter/gather entry, so build our indirect
+ * descriptor table, trying to merge as many entries as we can.
+ */
+ indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
+
+ ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
+ target->indirect_size, DMA_TO_DEVICE);
+
+ memset(&state, 0, sizeof(state));
+ state.desc = req->indirect_desc;
+ if (dev->use_fast_reg)
+ ret = srp_map_sg_fr(&state, ch, req, scat, count);
+ else
+ ret = srp_map_sg_dma(&state, ch, req, scat, count);
+ req->nmdesc = state.nmdesc;
+ if (ret < 0)
+ goto unmap;
+
+ {
+ DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
+ "Memory mapping consistency check");
+ if (DYNAMIC_DEBUG_BRANCH(ddm))
+ srp_check_mapping(&state, ch, req, scat, count);
+ }
+
+ /* We've mapped the request, now pull as much of the indirect
+ * descriptor table as we can into the command buffer. If this
+ * target is not using an external indirect table, we are
+ * guaranteed to fit into the command, as the SCSI layer won't
+ * give us more S/G entries than we allow.
+ */
+ if (state.ndesc == 1) {
+ /*
+ * Memory registration collapsed the sg-list into one entry,
+ * so use a direct descriptor.
+ */
+ struct srp_direct_buf *buf;
+
+ buf = (void *)cmd->add_data + cmd->add_cdb_len;
+ *buf = req->indirect_desc[0];
+ goto map_complete;
+ }
+
+ if (unlikely(target->cmd_sg_cnt < state.ndesc &&
+ !target->allow_ext_sg)) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Could not fit S/G list into SRP_CMD\n");
+ ret = -EIO;
+ goto unmap;
+ }
+
+ count = min(state.ndesc, target->cmd_sg_cnt);
+ table_len = state.ndesc * sizeof (struct srp_direct_buf);
+ idb_len = sizeof(struct srp_indirect_buf) + table_len;
+
+ fmt = SRP_DATA_DESC_INDIRECT;
+ len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
+ sizeof(struct srp_indirect_buf);
+ len += count * sizeof (struct srp_direct_buf);
+
+ memcpy(indirect_hdr->desc_list, req->indirect_desc,
+ count * sizeof (struct srp_direct_buf));
+
+ if (!target->global_rkey) {
+ ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
+ idb_len, &idb_rkey);
+ if (ret < 0)
+ goto unmap;
+ req->nmdesc++;
+ } else {
+ idb_rkey = cpu_to_be32(target->global_rkey);
+ }
+
+ indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
+ indirect_hdr->table_desc.key = idb_rkey;
+ indirect_hdr->table_desc.len = cpu_to_be32(table_len);
+ indirect_hdr->len = cpu_to_be32(state.total_len);
+
+ if (scmnd->sc_data_direction == DMA_TO_DEVICE)
+ cmd->data_out_desc_cnt = count;
+ else
+ cmd->data_in_desc_cnt = count;
+
+ ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
+ DMA_TO_DEVICE);
+
+map_complete:
+ if (scmnd->sc_data_direction == DMA_TO_DEVICE)
+ cmd->buf_fmt = fmt << 4;
+ else
+ cmd->buf_fmt = fmt;
+
+ return len;
+
+unmap:
+ srp_unmap_data(scmnd, ch, req);
+ if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
+ ret = -E2BIG;
+ return ret;
+}
+
+/*
+ * Return an IU and possible credit to the free pool
+ */
+static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
+ enum srp_iu_type iu_type)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ list_add(&iu->list, &ch->free_tx);
+ if (iu_type != SRP_IU_RSP)
+ ++ch->req_lim;
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+/*
+ * Must be called with ch->lock held to protect req_lim and free_tx.
+ * If IU is not sent, it must be returned using srp_put_tx_iu().
+ *
+ * Note:
+ * An upper limit for the number of allocated information units for each
+ * request type is:
+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
+ * more than Scsi_Host.can_queue requests.
+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
+ * one unanswered SRP request to an initiator.
+ */
+static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
+ enum srp_iu_type iu_type)
+{
+ struct srp_target_port *target = ch->target;
+ s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
+ struct srp_iu *iu;
+
+ lockdep_assert_held(&ch->lock);
+
+ ib_process_cq_direct(ch->send_cq, -1);
+
+ if (list_empty(&ch->free_tx))
+ return NULL;
+
+ /* Initiator responses to target requests do not consume credits */
+ if (iu_type != SRP_IU_RSP) {
+ if (ch->req_lim <= rsv) {
+ ++target->zero_req_lim;
+ return NULL;
+ }
+
+ --ch->req_lim;
+ }
+
+ iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
+ list_del(&iu->list);
+ return iu;
+}
+
+/*
+ * Note: if this function is called from inside ib_drain_sq() then it will
+ * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
+ * with status IB_WC_SUCCESS then that's a bug.
+ */
+static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
+ struct srp_rdma_ch *ch = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ srp_handle_qp_err(cq, wc, "SEND");
+ return;
+ }
+
+ lockdep_assert_held(&ch->lock);
+
+ list_add(&iu->list, &ch->free_tx);
+}
+
+/**
+ * srp_post_send() - send an SRP information unit
+ * @ch: RDMA channel over which to send the information unit.
+ * @iu: Information unit to send.
+ * @len: Length of the information unit excluding immediate data.
+ */
+static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
+{
+ struct srp_target_port *target = ch->target;
+ struct ib_send_wr wr;
+
+ if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
+ return -EINVAL;
+
+ iu->sge[0].addr = iu->dma;
+ iu->sge[0].length = len;
+ iu->sge[0].lkey = target->lkey;
+
+ iu->cqe.done = srp_send_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &iu->cqe;
+ wr.sg_list = &iu->sge[0];
+ wr.num_sge = iu->num_sge;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ return ib_post_send(ch->qp, &wr, NULL);
+}
+
+static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
+{
+ struct srp_target_port *target = ch->target;
+ struct ib_recv_wr wr;
+ struct ib_sge list;
+
+ list.addr = iu->dma;
+ list.length = iu->size;
+ list.lkey = target->lkey;
+
+ iu->cqe.done = srp_recv_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &iu->cqe;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ return ib_post_recv(ch->qp, &wr, NULL);
+}
+
+static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_request *req;
+ struct scsi_cmnd *scmnd;
+ unsigned long flags;
+
+ if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
+ if (rsp->tag == ch->tsk_mgmt_tag) {
+ ch->tsk_mgmt_status = -1;
+ if (be32_to_cpu(rsp->resp_data_len) >= 4)
+ ch->tsk_mgmt_status = rsp->data[3];
+ complete(&ch->tsk_mgmt_done);
+ } else {
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Received tsk mgmt response too late for tag %#llx\n",
+ rsp->tag);
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ } else {
+ scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
+ if (scmnd) {
+ req = scsi_cmd_priv(scmnd);
+ scmnd = srp_claim_req(ch, req, NULL, scmnd);
+ }
+ if (!scmnd) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
+ rsp->tag, ch - target->ch, ch->qp->qp_num);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ return;
+ }
+ scmnd->result = rsp->status;
+
+ if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
+ memcpy(scmnd->sense_buffer, rsp->data +
+ be32_to_cpu(rsp->resp_data_len),
+ min_t(int, be32_to_cpu(rsp->sense_data_len),
+ SCSI_SENSE_BUFFERSIZE));
+ }
+
+ if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
+ else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
+
+ srp_free_req(ch, req, scmnd,
+ be32_to_cpu(rsp->req_lim_delta));
+
+ scsi_done(scmnd);
+ }
+}
+
+static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
+ void *rsp, int len)
+{
+ struct srp_target_port *target = ch->target;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
+ unsigned long flags;
+ struct srp_iu *iu;
+ int err;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->req_lim += req_delta;
+ iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (!iu) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "no IU available to send response\n");
+ return 1;
+ }
+
+ iu->num_sge = 1;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
+ memcpy(iu->buf, rsp, len);
+ ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
+
+ err = srp_post_send(ch, iu, len);
+ if (err) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "unable to post response: %d\n", err);
+ srp_put_tx_iu(ch, iu, SRP_IU_RSP);
+ }
+
+ return err;
+}
+
+static void srp_process_cred_req(struct srp_rdma_ch *ch,
+ struct srp_cred_req *req)
+{
+ struct srp_cred_rsp rsp = {
+ .opcode = SRP_CRED_RSP,
+ .tag = req->tag,
+ };
+ s32 delta = be32_to_cpu(req->req_lim_delta);
+
+ if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
+ shost_printk(KERN_ERR, ch->target->scsi_host, PFX
+ "problems processing SRP_CRED_REQ\n");
+}
+
+static void srp_process_aer_req(struct srp_rdma_ch *ch,
+ struct srp_aer_req *req)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_aer_rsp rsp = {
+ .opcode = SRP_AER_RSP,
+ .tag = req->tag,
+ };
+ s32 delta = be32_to_cpu(req->req_lim_delta);
+
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
+
+ if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "problems processing SRP_AER_REQ\n");
+}
+
+static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
+ struct srp_rdma_ch *ch = cq->cq_context;
+ struct srp_target_port *target = ch->target;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
+ int res;
+ u8 opcode;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ srp_handle_qp_err(cq, wc, "RECV");
+ return;
+ }
+
+ ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
+ DMA_FROM_DEVICE);
+
+ opcode = *(u8 *) iu->buf;
+
+ if (0) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "recv completion, opcode 0x%02x\n", opcode);
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
+ iu->buf, wc->byte_len, true);
+ }
+
+ switch (opcode) {
+ case SRP_RSP:
+ srp_process_rsp(ch, iu->buf);
+ break;
+
+ case SRP_CRED_REQ:
+ srp_process_cred_req(ch, iu->buf);
+ break;
+
+ case SRP_AER_REQ:
+ srp_process_aer_req(ch, iu->buf);
+ break;
+
+ case SRP_T_LOGOUT:
+ /* XXX Handle target logout */
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Got target logout request\n");
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled SRP opcode 0x%02x\n", opcode);
+ break;
+ }
+
+ ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
+ DMA_FROM_DEVICE);
+
+ res = srp_post_recv(ch, iu);
+ if (res != 0)
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Recv failed with error code %d\n", res);
+}
+
+/**
+ * srp_tl_err_work() - handle a transport layer error
+ * @work: Work structure embedded in an SRP target port.
+ *
+ * Note: This function may get invoked before the rport has been created,
+ * hence the target->rport test.
+ */
+static void srp_tl_err_work(struct work_struct *work)
+{
+ struct srp_target_port *target;
+
+ target = container_of(work, struct srp_target_port, tl_err_work);
+ if (target->rport)
+ srp_start_tl_fail_timers(target->rport);
+}
+
+static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
+ const char *opname)
+{
+ struct srp_rdma_ch *ch = cq->cq_context;
+ struct srp_target_port *target = ch->target;
+
+ if (ch->connected && !target->qp_in_error) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed %s status %s (%d) for CQE %p\n",
+ opname, ib_wc_status_msg(wc->status), wc->status,
+ wc->wr_cqe);
+ queue_work(system_long_wq, &target->tl_err_work);
+ }
+ target->qp_in_error = true;
+}
+
+static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
+{
+ struct request *rq = scsi_cmd_to_rq(scmnd);
+ struct srp_target_port *target = host_to_target(shost);
+ struct srp_rdma_ch *ch;
+ struct srp_request *req = scsi_cmd_priv(scmnd);
+ struct srp_iu *iu;
+ struct srp_cmd *cmd;
+ struct ib_device *dev;
+ unsigned long flags;
+ u32 tag;
+ int len, ret;
+
+ scmnd->result = srp_chkready(target->rport);
+ if (unlikely(scmnd->result))
+ goto err;
+
+ WARN_ON_ONCE(rq->tag < 0);
+ tag = blk_mq_unique_tag(rq);
+ ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
+
+ spin_lock_irqsave(&ch->lock, flags);
+ iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (!iu)
+ goto err;
+
+ dev = target->srp_host->srp_dev->dev;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
+ DMA_TO_DEVICE);
+
+ cmd = iu->buf;
+ memset(cmd, 0, sizeof *cmd);
+
+ cmd->opcode = SRP_CMD;
+ int_to_scsilun(scmnd->device->lun, &cmd->lun);
+ cmd->tag = tag;
+ memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
+ if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
+ cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
+ 4);
+ if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
+ goto err_iu;
+ }
+
+ req->scmnd = scmnd;
+ req->cmd = iu;
+
+ len = srp_map_data(scmnd, ch, req);
+ if (len < 0) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Failed to map data (%d)\n", len);
+ /*
+ * If we ran out of memory descriptors (-ENOMEM) because an
+ * application is queuing many requests with more than
+ * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
+ * to reduce queue depth temporarily.
+ */
+ scmnd->result = len == -ENOMEM ?
+ DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
+ goto err_iu;
+ }
+
+ ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
+ DMA_TO_DEVICE);
+
+ if (srp_post_send(ch, iu, len)) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
+ scmnd->result = DID_ERROR << 16;
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ srp_unmap_data(scmnd, ch, req);
+
+err_iu:
+ srp_put_tx_iu(ch, iu, SRP_IU_CMD);
+
+ /*
+ * Avoid that the loops that iterate over the request ring can
+ * encounter a dangling SCSI command pointer.
+ */
+ req->scmnd = NULL;
+
+err:
+ if (scmnd->result) {
+ scsi_done(scmnd);
+ ret = 0;
+ } else {
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ return ret;
+}
+
+/*
+ * Note: the resources allocated in this function are freed in
+ * srp_free_ch_ib().
+ */
+static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ int i;
+
+ ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
+ GFP_KERNEL);
+ if (!ch->rx_ring)
+ goto err_no_ring;
+ ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
+ GFP_KERNEL);
+ if (!ch->tx_ring)
+ goto err_no_ring;
+
+ for (i = 0; i < target->queue_size; ++i) {
+ ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
+ ch->max_ti_iu_len,
+ GFP_KERNEL, DMA_FROM_DEVICE);
+ if (!ch->rx_ring[i])
+ goto err;
+ }
+
+ for (i = 0; i < target->queue_size; ++i) {
+ ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
+ ch->max_it_iu_len,
+ GFP_KERNEL, DMA_TO_DEVICE);
+ if (!ch->tx_ring[i])
+ goto err;
+
+ list_add(&ch->tx_ring[i]->list, &ch->free_tx);
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < target->queue_size; ++i) {
+ srp_free_iu(target->srp_host, ch->rx_ring[i]);
+ srp_free_iu(target->srp_host, ch->tx_ring[i]);
+ }
+
+
+err_no_ring:
+ kfree(ch->tx_ring);
+ ch->tx_ring = NULL;
+ kfree(ch->rx_ring);
+ ch->rx_ring = NULL;
+
+ return -ENOMEM;
+}
+
+static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
+{
+ uint64_t T_tr_ns, max_compl_time_ms;
+ uint32_t rq_tmo_jiffies;
+
+ /*
+ * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
+ * table 91), both the QP timeout and the retry count have to be set
+ * for RC QP's during the RTR to RTS transition.
+ */
+ WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
+ (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
+
+ /*
+ * Set target->rq_tmo_jiffies to one second more than the largest time
+ * it can take before an error completion is generated. See also
+ * C9-140..142 in the IBTA spec for more information about how to
+ * convert the QP Local ACK Timeout value to nanoseconds.
+ */
+ T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
+ max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
+ do_div(max_compl_time_ms, NSEC_PER_MSEC);
+ rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
+
+ return rq_tmo_jiffies;
+}
+
+static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
+ const struct srp_login_rsp *lrsp,
+ struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct ib_qp_attr *qp_attr = NULL;
+ int attr_mask = 0;
+ int ret = 0;
+ int i;
+
+ if (lrsp->opcode == SRP_LOGIN_RSP) {
+ ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
+ ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
+ ch->use_imm_data = srp_use_imm_data &&
+ (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
+ ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ ch->use_imm_data,
+ target->max_it_iu_size);
+ WARN_ON_ONCE(ch->max_it_iu_len >
+ be32_to_cpu(lrsp->max_it_iu_len));
+
+ if (ch->use_imm_data)
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "using immediate data\n");
+
+ /*
+ * Reserve credits for task management so we don't
+ * bounce requests back to the SCSI mid-layer.
+ */
+ target->scsi_host->can_queue
+ = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
+ target->scsi_host->can_queue);
+ target->scsi_host->cmd_per_lun
+ = min_t(int, target->scsi_host->can_queue,
+ target->scsi_host->cmd_per_lun);
+ } else {
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
+ ret = -ECONNRESET;
+ goto error;
+ }
+
+ if (!ch->rx_ring) {
+ ret = srp_alloc_iu_bufs(ch);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < target->queue_size; i++) {
+ struct srp_iu *iu = ch->rx_ring[i];
+
+ ret = srp_post_recv(ch, iu);
+ if (ret)
+ goto error;
+ }
+
+ if (!target->using_rdma_cm) {
+ ret = -ENOMEM;
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
+ if (!qp_attr)
+ goto error;
+
+ qp_attr->qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
+
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
+
+ qp_attr->qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
+
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
+
+ ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ }
+
+error_free:
+ kfree(qp_attr);
+
+error:
+ ch->status = ret;
+}
+
+static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event,
+ struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct Scsi_Host *shost = target->scsi_host;
+ struct ib_class_port_info *cpi;
+ int opcode;
+ u16 dlid;
+
+ switch (event->param.rej_rcvd.reason) {
+ case IB_CM_REJ_PORT_CM_REDIRECT:
+ cpi = event->param.rej_rcvd.ari;
+ dlid = be16_to_cpu(cpi->redirect_lid);
+ sa_path_set_dlid(&ch->ib_cm.path, dlid);
+ ch->ib_cm.path.pkey = cpi->redirect_pkey;
+ cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
+ memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
+
+ ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
+ break;
+
+ case IB_CM_REJ_PORT_REDIRECT:
+ if (srp_target_is_topspin(target)) {
+ union ib_gid *dgid = &ch->ib_cm.path.dgid;
+
+ /*
+ * Topspin/Cisco SRP gateways incorrectly send
+ * reject reason code 25 when they mean 24
+ * (port redirect).
+ */
+ memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
+
+ shost_printk(KERN_DEBUG, shost,
+ PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
+ be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
+
+ ch->status = SRP_PORT_REDIRECT;
+ } else {
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
+ ch->status = -ECONNRESET;
+ }
+ break;
+
+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ opcode = *(u8 *) event->private_data;
+ if (opcode == SRP_LOGIN_REJ) {
+ struct srp_login_rej *rej = event->private_data;
+ u32 reason = be32_to_cpu(rej->reason);
+
+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ else
+ shost_printk(KERN_WARNING, shost, PFX
+ "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
+ target->sgid.raw,
+ target->ib_cm.orig_dgid.raw,
+ reason);
+ } else
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
+ " opcode 0x%02x\n", opcode);
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
+ ch->status = SRP_STALE_CONN;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
+ event->param.rej_rcvd.reason);
+ ch->status = -ECONNRESET;
+ }
+}
+
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct srp_rdma_ch *ch = cm_id->context;
+ struct srp_target_port *target = ch->target;
+ int comp = 0;
+
+ switch (event->event) {
+ case IB_CM_REQ_ERROR:
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM REQ failed\n");
+ comp = 1;
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REP_RECEIVED:
+ comp = 1;
+ srp_cm_rep_handler(cm_id, event->private_data, ch);
+ break;
+
+ case IB_CM_REJ_RECEIVED:
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
+ comp = 1;
+
+ srp_ib_cm_rej_handler(cm_id, event, ch);
+ break;
+
+ case IB_CM_DREQ_RECEIVED:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "DREQ received - connection closed\n");
+ ch->connected = false;
+ if (ib_send_cm_drep(cm_id, NULL, 0))
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Sending CM DREP failed\n");
+ queue_work(system_long_wq, &target->tl_err_work);
+ break;
+
+ case IB_CM_TIMEWAIT_EXIT:
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "connection closed\n");
+ comp = 1;
+
+ ch->status = 0;
+ break;
+
+ case IB_CM_MRA_RECEIVED:
+ case IB_CM_DREQ_ERROR:
+ case IB_CM_DREP_RECEIVED:
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled CM event %d\n", event->event);
+ break;
+ }
+
+ if (comp)
+ complete(&ch->done);
+
+ return 0;
+}
+
+static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
+ struct rdma_cm_event *event)
+{
+ struct srp_target_port *target = ch->target;
+ struct Scsi_Host *shost = target->scsi_host;
+ int opcode;
+
+ switch (event->status) {
+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ opcode = *(u8 *) event->param.conn.private_data;
+ if (opcode == SRP_LOGIN_REJ) {
+ struct srp_login_rej *rej =
+ (struct srp_login_rej *)
+ event->param.conn.private_data;
+ u32 reason = be32_to_cpu(rej->reason);
+
+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ else
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ } else {
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
+ opcode);
+ }
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: stale connection\n");
+ ch->status = SRP_STALE_CONN;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
+ event->status);
+ ch->status = -ECONNRESET;
+ break;
+ }
+}
+
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct srp_rdma_ch *ch = cm_id->context;
+ struct srp_target_port *target = ch->target;
+ int comp = 0;
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ ch->status = -ENXIO;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ ch->status = -EHOSTUNREACH;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM REQ failed\n");
+ comp = 1;
+ ch->status = -ECONNRESET;
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ comp = 1;
+ srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
+ break;
+
+ case RDMA_CM_EVENT_REJECTED:
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
+ comp = 1;
+
+ srp_rdma_cm_rej_handler(ch, event);
+ break;
+
+ case RDMA_CM_EVENT_DISCONNECTED:
+ if (ch->connected) {
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "received DREQ\n");
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ comp = 1;
+ ch->status = 0;
+ queue_work(system_long_wq, &target->tl_err_work);
+ }
+ break;
+
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "connection closed\n");
+
+ comp = 1;
+ ch->status = 0;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled CM event %d\n", event->event);
+ break;
+ }
+
+ if (comp)
+ complete(&ch->done);
+
+ return 0;
+}
+
+/**
+ * srp_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ *
+ * Returns queue depth.
+ */
+static int
+srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ if (!sdev->tagged_supported)
+ qdepth = 1;
+ return scsi_change_queue_depth(sdev, qdepth);
+}
+
+static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
+ u8 func, u8 *status)
+{
+ struct srp_target_port *target = ch->target;
+ struct srp_rport *rport = target->rport;
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
+ struct srp_iu *iu;
+ struct srp_tsk_mgmt *tsk_mgmt;
+ int res;
+
+ if (!ch->connected || target->qp_in_error)
+ return -1;
+
+ /*
+ * Lock the rport mutex to avoid that srp_create_ch_ib() is
+ * invoked while a task management function is being sent.
+ */
+ mutex_lock(&rport->mutex);
+ spin_lock_irq(&ch->lock);
+ iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
+ spin_unlock_irq(&ch->lock);
+
+ if (!iu) {
+ mutex_unlock(&rport->mutex);
+
+ return -1;
+ }
+
+ iu->num_sge = 1;
+
+ ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
+ DMA_TO_DEVICE);
+ tsk_mgmt = iu->buf;
+ memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
+
+ tsk_mgmt->opcode = SRP_TSK_MGMT;
+ int_to_scsilun(lun, &tsk_mgmt->lun);
+ tsk_mgmt->tsk_mgmt_func = func;
+ tsk_mgmt->task_tag = req_tag;
+
+ spin_lock_irq(&ch->lock);
+ ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
+ tsk_mgmt->tag = ch->tsk_mgmt_tag;
+ spin_unlock_irq(&ch->lock);
+
+ init_completion(&ch->tsk_mgmt_done);
+
+ ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+ DMA_TO_DEVICE);
+ if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
+ srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
+ mutex_unlock(&rport->mutex);
+
+ return -1;
+ }
+ res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
+ msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
+ if (res > 0 && status)
+ *status = ch->tsk_mgmt_status;
+ mutex_unlock(&rport->mutex);
+
+ WARN_ON_ONCE(res < 0);
+
+ return res > 0 ? 0 : -1;
+}
+
+static int srp_abort(struct scsi_cmnd *scmnd)
+{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req = scsi_cmd_priv(scmnd);
+ u32 tag;
+ u16 ch_idx;
+ struct srp_rdma_ch *ch;
+
+ shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+
+ tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
+ ch_idx = blk_mq_unique_tag_to_hwq(tag);
+ if (WARN_ON_ONCE(ch_idx >= target->ch_count))
+ return SUCCESS;
+ ch = &target->ch[ch_idx];
+ if (!srp_claim_req(ch, req, NULL, scmnd))
+ return SUCCESS;
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Sending SRP abort for tag %#x\n", tag);
+ if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
+ SRP_TSK_ABORT_TASK, NULL) == 0) {
+ srp_free_req(ch, req, scmnd, 0);
+ return SUCCESS;
+ }
+ if (target->rport->state == SRP_RPORT_LOST)
+ return FAST_IO_FAIL;
+
+ return FAILED;
+}
+
+static int srp_reset_device(struct scsi_cmnd *scmnd)
+{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_rdma_ch *ch;
+ u8 status;
+
+ shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
+
+ ch = &target->ch[0];
+ if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
+ SRP_TSK_LUN_RESET, &status))
+ return FAILED;
+ if (status)
+ return FAILED;
+
+ return SUCCESS;
+}
+
+static int srp_reset_host(struct scsi_cmnd *scmnd)
+{
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+
+ shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
+
+ return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
+}
+
+static int srp_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct srp_target_port *target = host_to_target(shost);
+
+ if (target->target_can_queue)
+ starget->can_queue = target->target_can_queue;
+ return 0;
+}
+
+static int srp_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_target_port *target = host_to_target(shost);
+ struct request_queue *q = sdev->request_queue;
+ unsigned long timeout;
+
+ if (sdev->type == TYPE_DISK) {
+ timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
+ blk_queue_rq_timeout(q, timeout);
+ }
+
+ return 0;
+}
+
+static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
+}
+
+static DEVICE_ATTR_RO(id_ext);
+
+static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
+}
+
+static DEVICE_ATTR_RO(ioc_guid);
+
+static ssize_t service_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sysfs_emit(buf, "0x%016llx\n",
+ be64_to_cpu(target->ib_cm.service_id));
+}
+
+static DEVICE_ATTR_RO(service_id);
+
+static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ if (target->using_rdma_cm)
+ return -ENOENT;
+
+ return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
+}
+
+static DEVICE_ATTR_RO(pkey);
+
+static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
+}
+
+static DEVICE_ATTR_RO(sgid);
+
+static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+ struct srp_rdma_ch *ch = &target->ch[0];
+
+ if (target->using_rdma_cm)
+ return -ENOENT;
+
+ return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
+}
+
+static DEVICE_ATTR_RO(dgid);
+
+static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ if (target->using_rdma_cm)
+ return -ENOENT;
+
+ return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
+}
+
+static DEVICE_ATTR_RO(orig_dgid);
+
+static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+ struct srp_rdma_ch *ch;
+ int i, req_lim = INT_MAX;
+
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ req_lim = min(req_lim, ch->req_lim);
+ }
+
+ return sysfs_emit(buf, "%d\n", req_lim);
+}
+
+static DEVICE_ATTR_RO(req_lim);
+
+static ssize_t zero_req_lim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%d\n", target->zero_req_lim);
+}
+
+static DEVICE_ATTR_RO(zero_req_lim);
+
+static ssize_t local_ib_port_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%u\n", target->srp_host->port);
+}
+
+static DEVICE_ATTR_RO(local_ib_port);
+
+static ssize_t local_ib_device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%s\n",
+ dev_name(&target->srp_host->srp_dev->dev->dev));
+}
+
+static DEVICE_ATTR_RO(local_ib_device);
+
+static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%d\n", target->ch_count);
+}
+
+static DEVICE_ATTR_RO(ch_count);
+
+static ssize_t comp_vector_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%d\n", target->comp_vector);
+}
+
+static DEVICE_ATTR_RO(comp_vector);
+
+static ssize_t tl_retry_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%d\n", target->tl_retry_count);
+}
+
+static DEVICE_ATTR_RO(tl_retry_count);
+
+static ssize_t cmd_sg_entries_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
+}
+
+static DEVICE_ATTR_RO(cmd_sg_entries);
+
+static ssize_t allow_ext_sg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
+}
+
+static DEVICE_ATTR_RO(allow_ext_sg);
+
+static struct attribute *srp_host_attrs[] = {
+ &dev_attr_id_ext.attr,
+ &dev_attr_ioc_guid.attr,
+ &dev_attr_service_id.attr,
+ &dev_attr_pkey.attr,
+ &dev_attr_sgid.attr,
+ &dev_attr_dgid.attr,
+ &dev_attr_orig_dgid.attr,
+ &dev_attr_req_lim.attr,
+ &dev_attr_zero_req_lim.attr,
+ &dev_attr_local_ib_port.attr,
+ &dev_attr_local_ib_device.attr,
+ &dev_attr_ch_count.attr,
+ &dev_attr_comp_vector.attr,
+ &dev_attr_tl_retry_count.attr,
+ &dev_attr_cmd_sg_entries.attr,
+ &dev_attr_allow_ext_sg.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(srp_host);
+
+static struct scsi_host_template srp_template = {
+ .module = THIS_MODULE,
+ .name = "InfiniBand SRP initiator",
+ .proc_name = DRV_NAME,
+ .target_alloc = srp_target_alloc,
+ .slave_configure = srp_slave_configure,
+ .info = srp_target_info,
+ .init_cmd_priv = srp_init_cmd_priv,
+ .exit_cmd_priv = srp_exit_cmd_priv,
+ .queuecommand = srp_queuecommand,
+ .change_queue_depth = srp_change_queue_depth,
+ .eh_timed_out = srp_timed_out,
+ .eh_abort_handler = srp_abort,
+ .eh_device_reset_handler = srp_reset_device,
+ .eh_host_reset_handler = srp_reset_host,
+ .skip_settle_delay = true,
+ .sg_tablesize = SRP_DEF_SG_TABLESIZE,
+ .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
+ .this_id = -1,
+ .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
+ .shost_groups = srp_host_groups,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct srp_request),
+};
+
+static int srp_sdev_count(struct Scsi_Host *host)
+{
+ struct scsi_device *sdev;
+ int c = 0;
+
+ shost_for_each_device(sdev, host)
+ c++;
+
+ return c;
+}
+
+/*
+ * Return values:
+ * < 0 upon failure. Caller is responsible for SRP target port cleanup.
+ * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
+ * removal has been scheduled.
+ * 0 and target->state != SRP_TARGET_REMOVED upon success.
+ */
+static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+{
+ struct srp_rport_identifiers ids;
+ struct srp_rport *rport;
+
+ target->state = SRP_TARGET_SCANNING;
+ sprintf(target->target_name, "SRP.T10:%016llX",
+ be64_to_cpu(target->id_ext));
+
+ if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
+ return -ENODEV;
+
+ memcpy(ids.port_id, &target->id_ext, 8);
+ memcpy(ids.port_id + 8, &target->ioc_guid, 8);
+ ids.roles = SRP_RPORT_ROLE_TARGET;
+ rport = srp_rport_add(target->scsi_host, &ids);
+ if (IS_ERR(rport)) {
+ scsi_remove_host(target->scsi_host);
+ return PTR_ERR(rport);
+ }
+
+ rport->lld_data = target;
+ target->rport = rport;
+
+ spin_lock(&host->target_lock);
+ list_add_tail(&target->list, &host->target_list);
+ spin_unlock(&host->target_lock);
+
+ scsi_scan_target(&target->scsi_host->shost_gendev,
+ 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+
+ if (srp_connected_ch(target) < target->ch_count ||
+ target->qp_in_error) {
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "SCSI scan failed - removing SCSI host\n");
+ srp_queue_remove_work(target);
+ goto out;
+ }
+
+ pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
+ dev_name(&target->scsi_host->shost_gendev),
+ srp_sdev_count(target->scsi_host));
+
+ spin_lock_irq(&target->lock);
+ if (target->state == SRP_TARGET_SCANNING)
+ target->state = SRP_TARGET_LIVE;
+ spin_unlock_irq(&target->lock);
+
+out:
+ return 0;
+}
+
+static void srp_release_dev(struct device *dev)
+{
+ struct srp_host *host =
+ container_of(dev, struct srp_host, dev);
+
+ kfree(host);
+}
+
+static struct attribute *srp_class_attrs[];
+
+ATTRIBUTE_GROUPS(srp_class);
+
+static struct class srp_class = {
+ .name = "infiniband_srp",
+ .dev_groups = srp_class_groups,
+ .dev_release = srp_release_dev
+};
+
+/**
+ * srp_conn_unique() - check whether the connection to a target is unique
+ * @host: SRP host.
+ * @target: SRP target port.
+ */
+static bool srp_conn_unique(struct srp_host *host,
+ struct srp_target_port *target)
+{
+ struct srp_target_port *t;
+ bool ret = false;
+
+ if (target->state == SRP_TARGET_REMOVED)
+ goto out;
+
+ ret = true;
+
+ spin_lock(&host->target_lock);
+ list_for_each_entry(t, &host->target_list, list) {
+ if (t != target &&
+ target->id_ext == t->id_ext &&
+ target->ioc_guid == t->ioc_guid &&
+ target->initiator_ext == t->initiator_ext) {
+ ret = false;
+ break;
+ }
+ }
+ spin_unlock(&host->target_lock);
+
+out:
+ return ret;
+}
+
+/*
+ * Target ports are added by writing
+ *
+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
+ * pkey=<P_Key>,service_id=<service ID>
+ * or
+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
+ * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
+ *
+ * to the add_target sysfs attribute.
+ */
+enum {
+ SRP_OPT_ERR = 0,
+ SRP_OPT_ID_EXT = 1 << 0,
+ SRP_OPT_IOC_GUID = 1 << 1,
+ SRP_OPT_DGID = 1 << 2,
+ SRP_OPT_PKEY = 1 << 3,
+ SRP_OPT_SERVICE_ID = 1 << 4,
+ SRP_OPT_MAX_SECT = 1 << 5,
+ SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
+ SRP_OPT_IO_CLASS = 1 << 7,
+ SRP_OPT_INITIATOR_EXT = 1 << 8,
+ SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
+ SRP_OPT_ALLOW_EXT_SG = 1 << 10,
+ SRP_OPT_SG_TABLESIZE = 1 << 11,
+ SRP_OPT_COMP_VECTOR = 1 << 12,
+ SRP_OPT_TL_RETRY_COUNT = 1 << 13,
+ SRP_OPT_QUEUE_SIZE = 1 << 14,
+ SRP_OPT_IP_SRC = 1 << 15,
+ SRP_OPT_IP_DEST = 1 << 16,
+ SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+ SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
+ SRP_OPT_CH_COUNT = 1 << 19,
+};
+
+static unsigned int srp_opt_mandatory[] = {
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_DGID |
+ SRP_OPT_PKEY |
+ SRP_OPT_SERVICE_ID,
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_IP_DEST,
+};
+
+static const match_table_t srp_opt_tokens = {
+ { SRP_OPT_ID_EXT, "id_ext=%s" },
+ { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
+ { SRP_OPT_DGID, "dgid=%s" },
+ { SRP_OPT_PKEY, "pkey=%x" },
+ { SRP_OPT_SERVICE_ID, "service_id=%s" },
+ { SRP_OPT_MAX_SECT, "max_sect=%d" },
+ { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
+ { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
+ { SRP_OPT_IO_CLASS, "io_class=%x" },
+ { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
+ { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
+ { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
+ { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
+ { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
+ { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
+ { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { SRP_OPT_IP_SRC, "src=%s" },
+ { SRP_OPT_IP_DEST, "dest=%s" },
+ { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
+ { SRP_OPT_CH_COUNT, "ch_count=%u", },
+ { SRP_OPT_ERR, NULL }
+};
+
+/**
+ * srp_parse_in - parse an IP address and port number combination
+ * @net: [in] Network namespace.
+ * @sa: [out] Address family, IP address and port number.
+ * @addr_port_str: [in] IP address and port number.
+ * @has_port: [out] Whether or not @addr_port_str includes a port number.
+ *
+ * Parse the following address formats:
+ * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
+ * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
+ */
+static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+ const char *addr_port_str, bool *has_port)
+{
+ char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
+ char *port_str;
+ int ret;
+
+ if (!addr)
+ return -ENOMEM;
+ port_str = strrchr(addr, ':');
+ if (port_str && strchr(port_str, ']'))
+ port_str = NULL;
+ if (port_str)
+ *port_str++ = '\0';
+ if (has_port)
+ *has_port = port_str != NULL;
+ ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
+ if (ret && addr[0]) {
+ addr_end = addr + strlen(addr) - 1;
+ if (addr[0] == '[' && *addr_end == ']') {
+ *addr_end = '\0';
+ ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
+ port_str, sa);
+ }
+ }
+ kfree(addr);
+ pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
+ return ret;
+}
+
+static int srp_parse_options(struct net *net, const char *buf,
+ struct srp_target_port *target)
+{
+ char *options, *sep_opt;
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ unsigned long long ull;
+ bool has_port;
+ int opt_mask = 0;
+ int token;
+ int ret = -EINVAL;
+ int i;
+
+ options = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ sep_opt = options;
+ while ((p = strsep(&sep_opt, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, srp_opt_tokens, args);
+ opt_mask |= token;
+
+ switch (token) {
+ case SRP_OPT_ID_EXT:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid id_ext parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->id_ext = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_IOC_GUID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid ioc_guid parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ioc_guid = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_DGID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) != 32) {
+ pr_warn("bad dest GID parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+
+ ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
+ kfree(p);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case SRP_OPT_PKEY:
+ ret = match_hex(args, &token);
+ if (ret) {
+ pr_warn("bad P_Key parameter '%s'\n", p);
+ goto out;
+ }
+ target->ib_cm.pkey = cpu_to_be16(token);
+ break;
+
+ case SRP_OPT_SERVICE_ID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad service_id parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ib_cm.service_id = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_SRC:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
+ NULL);
+ if (ret < 0) {
+ pr_warn("bad source parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->rdma_cm.src_specified = true;
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_DEST:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
+ &has_port);
+ if (!has_port)
+ ret = -EINVAL;
+ if (ret < 0) {
+ pr_warn("bad dest parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->using_rdma_cm = true;
+ kfree(p);
+ break;
+
+ case SRP_OPT_MAX_SECT:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("bad max sect parameter '%s'\n", p);
+ goto out;
+ }
+ target->scsi_host->max_sectors = token;
+ break;
+
+ case SRP_OPT_QUEUE_SIZE:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 1) {
+ pr_warn("bad queue_size parameter '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->scsi_host->can_queue = token;
+ target->queue_size = token + SRP_RSP_SQ_SIZE +
+ SRP_TSK_MGMT_SQ_SIZE;
+ if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
+ target->scsi_host->cmd_per_lun = token;
+ break;
+
+ case SRP_OPT_MAX_CMD_PER_LUN:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 1) {
+ pr_warn("bad max cmd_per_lun parameter '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->scsi_host->cmd_per_lun = token;
+ break;
+
+ case SRP_OPT_TARGET_CAN_QUEUE:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 1) {
+ pr_warn("bad max target_can_queue parameter '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->target_can_queue = token;
+ break;
+
+ case SRP_OPT_IO_CLASS:
+ ret = match_hex(args, &token);
+ if (ret) {
+ pr_warn("bad IO class parameter '%s'\n", p);
+ goto out;
+ }
+ if (token != SRP_REV10_IB_IO_CLASS &&
+ token != SRP_REV16A_IB_IO_CLASS) {
+ pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
+ token, SRP_REV10_IB_IO_CLASS,
+ SRP_REV16A_IB_IO_CLASS);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->io_class = token;
+ break;
+
+ case SRP_OPT_INITIATOR_EXT:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad initiator_ext value '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->initiator_ext = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_CMD_SG_ENTRIES:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 1 || token > 255) {
+ pr_warn("bad max cmd_sg_entries parameter '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->cmd_sg_cnt = token;
+ break;
+
+ case SRP_OPT_ALLOW_EXT_SG:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("bad allow_ext_sg parameter '%s'\n", p);
+ goto out;
+ }
+ target->allow_ext_sg = !!token;
+ break;
+
+ case SRP_OPT_SG_TABLESIZE:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 1 || token > SG_MAX_SEGMENTS) {
+ pr_warn("bad max sg_tablesize parameter '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->sg_tablesize = token;
+ break;
+
+ case SRP_OPT_COMP_VECTOR:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 0) {
+ pr_warn("bad comp_vector parameter '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->comp_vector = token;
+ break;
+
+ case SRP_OPT_TL_RETRY_COUNT:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 2 || token > 7) {
+ pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->tl_retry_count = token;
+ break;
+
+ case SRP_OPT_MAX_IT_IU_SIZE:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 0) {
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->max_it_iu_size = token;
+ break;
+
+ case SRP_OPT_CH_COUNT:
+ ret = match_int(args, &token);
+ if (ret) {
+ pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
+ p, ret);
+ goto out;
+ }
+ if (token < 1) {
+ pr_warn("bad channel count %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ target->ch_count = token;
+ break;
+
+ default:
+ pr_warn("unknown parameter or missing value '%s' in target creation request\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
+ if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret)
+ pr_warn("target creation request is missing one or more parameters\n");
+
+ if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
+ && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
+ pr_warn("cmd_per_lun = %d > queue_size = %d\n",
+ target->scsi_host->cmd_per_lun,
+ target->scsi_host->can_queue);
+
+out:
+ kfree(options);
+ return ret;
+}
+
+static ssize_t add_target_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct srp_host *host =
+ container_of(dev, struct srp_host, dev);
+ struct Scsi_Host *target_host;
+ struct srp_target_port *target;
+ struct srp_rdma_ch *ch;
+ struct srp_device *srp_dev = host->srp_dev;
+ struct ib_device *ibdev = srp_dev->dev;
+ int ret, i, ch_idx;
+ unsigned int max_sectors_per_mr, mr_per_cmd = 0;
+ bool multich = false;
+ uint32_t max_iu_len;
+
+ target_host = scsi_host_alloc(&srp_template,
+ sizeof (struct srp_target_port));
+ if (!target_host)
+ return -ENOMEM;
+
+ target_host->transportt = ib_srp_transport_template;
+ target_host->max_channel = 0;
+ target_host->max_id = 1;
+ target_host->max_lun = -1LL;
+ target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
+ target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
+
+ if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
+ target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
+
+ target = host_to_target(target_host);
+
+ target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
+ target->io_class = SRP_REV16A_IB_IO_CLASS;
+ target->scsi_host = target_host;
+ target->srp_host = host;
+ target->lkey = host->srp_dev->pd->local_dma_lkey;
+ target->global_rkey = host->srp_dev->global_rkey;
+ target->cmd_sg_cnt = cmd_sg_entries;
+ target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
+ target->allow_ext_sg = allow_ext_sg;
+ target->tl_retry_count = 7;
+ target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
+
+ /*
+ * Avoid that the SCSI host can be removed by srp_remove_target()
+ * before this function returns.
+ */
+ scsi_host_get(target->scsi_host);
+
+ ret = mutex_lock_interruptible(&host->add_target_mutex);
+ if (ret < 0)
+ goto put;
+
+ ret = srp_parse_options(target->net, buf, target);
+ if (ret)
+ goto out;
+
+ if (!srp_conn_unique(target->srp_host, target)) {
+ if (target->using_rdma_cm) {
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ &target->rdma_cm.dst);
+ } else {
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be64_to_cpu(target->initiator_ext));
+ }
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if (!srp_dev->has_fr && !target->allow_ext_sg &&
+ target->cmd_sg_cnt < target->sg_tablesize) {
+ pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
+ target->sg_tablesize = target->cmd_sg_cnt;
+ }
+
+ if (srp_dev->use_fast_reg) {
+ bool gaps_reg = ibdev->attrs.kernel_cap_flags &
+ IBK_SG_GAPS_REG;
+
+ max_sectors_per_mr = srp_dev->max_pages_per_mr <<
+ (ilog2(srp_dev->mr_page_size) - 9);
+ if (!gaps_reg) {
+ /*
+ * FR can only map one HCA page per entry. If the start
+ * address is not aligned on a HCA page boundary two
+ * entries will be used for the head and the tail
+ * although these two entries combined contain at most
+ * one HCA page of data. Hence the "+ 1" in the
+ * calculation below.
+ *
+ * The indirect data buffer descriptor is contiguous
+ * so the memory for that buffer will only be
+ * registered if register_always is true. Hence add
+ * one to mr_per_cmd if register_always has been set.
+ */
+ mr_per_cmd = register_always +
+ (target->scsi_host->max_sectors + 1 +
+ max_sectors_per_mr - 1) / max_sectors_per_mr;
+ } else {
+ mr_per_cmd = register_always +
+ (target->sg_tablesize +
+ srp_dev->max_pages_per_mr - 1) /
+ srp_dev->max_pages_per_mr;
+ }
+ pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
+ target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
+ max_sectors_per_mr, mr_per_cmd);
+ }
+
+ target_host->sg_tablesize = target->sg_tablesize;
+ target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
+ target->mr_per_cmd = mr_per_cmd;
+ target->indirect_size = target->sg_tablesize *
+ sizeof (struct srp_direct_buf);
+ max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ srp_use_imm_data,
+ target->max_it_iu_size);
+
+ INIT_WORK(&target->tl_err_work, srp_tl_err_work);
+ INIT_WORK(&target->remove_work, srp_remove_work);
+ spin_lock_init(&target->lock);
+ ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
+ if (ret)
+ goto out;
+
+ ret = -ENOMEM;
+ if (target->ch_count == 0) {
+ target->ch_count =
+ min(ch_count ?:
+ max(4 * num_online_nodes(),
+ ibdev->num_comp_vectors),
+ num_online_cpus());
+ }
+
+ target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
+ GFP_KERNEL);
+ if (!target->ch)
+ goto out;
+
+ for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
+ ch = &target->ch[ch_idx];
+ ch->target = target;
+ ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->free_tx);
+ ret = srp_new_cm_id(ch);
+ if (ret)
+ goto err_disconnect;
+
+ ret = srp_create_ch_ib(ch);
+ if (ret)
+ goto err_disconnect;
+
+ ret = srp_connect_ch(ch, max_iu_len, multich);
+ if (ret) {
+ char dst[64];
+
+ if (target->using_rdma_cm)
+ snprintf(dst, sizeof(dst), "%pIS",
+ &target->rdma_cm.dst);
+ else
+ snprintf(dst, sizeof(dst), "%pI6",
+ target->ib_cm.orig_dgid.raw);
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "Connection %d/%d to %s failed\n",
+ ch_idx,
+ target->ch_count, dst);
+ if (ch_idx == 0) {
+ goto free_ch;
+ } else {
+ srp_free_ch_ib(target, ch);
+ target->ch_count = ch - target->ch;
+ goto connected;
+ }
+ }
+ multich = true;
+ }
+
+connected:
+ target->scsi_host->nr_hw_queues = target->ch_count;
+
+ ret = srp_add_target(host, target);
+ if (ret)
+ goto err_disconnect;
+
+ if (target->state != SRP_TARGET_REMOVED) {
+ if (target->using_rdma_cm) {
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ target->sgid.raw, &target->rdma_cm.dst);
+ } else {
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id),
+ target->sgid.raw,
+ target->ib_cm.orig_dgid.raw);
+ }
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&host->add_target_mutex);
+
+put:
+ scsi_host_put(target->scsi_host);
+ if (ret < 0) {
+ /*
+ * If a call to srp_remove_target() has not been scheduled,
+ * drop the network namespace reference now that was obtained
+ * earlier in this function.
+ */
+ if (target->state != SRP_TARGET_REMOVED)
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
+ scsi_host_put(target->scsi_host);
+ }
+
+ return ret;
+
+err_disconnect:
+ srp_disconnect_target(target);
+
+free_ch:
+ for (i = 0; i < target->ch_count; i++) {
+ ch = &target->ch[i];
+ srp_free_ch_ib(target, ch);
+ }
+
+ kfree(target->ch);
+ goto out;
+}
+
+static DEVICE_ATTR_WO(add_target);
+
+static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_host *host = container_of(dev, struct srp_host, dev);
+
+ return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
+}
+
+static DEVICE_ATTR_RO(ibdev);
+
+static ssize_t port_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct srp_host *host = container_of(dev, struct srp_host, dev);
+
+ return sysfs_emit(buf, "%u\n", host->port);
+}
+
+static DEVICE_ATTR_RO(port);
+
+static struct attribute *srp_class_attrs[] = {
+ &dev_attr_add_target.attr,
+ &dev_attr_ibdev.attr,
+ &dev_attr_port.attr,
+ NULL
+};
+
+static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
+{
+ struct srp_host *host;
+
+ host = kzalloc(sizeof *host, GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ INIT_LIST_HEAD(&host->target_list);
+ spin_lock_init(&host->target_lock);
+ mutex_init(&host->add_target_mutex);
+ host->srp_dev = device;
+ host->port = port;
+
+ device_initialize(&host->dev);
+ host->dev.class = &srp_class;
+ host->dev.parent = device->dev->dev.parent;
+ if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
+ port))
+ goto put_host;
+ if (device_add(&host->dev))
+ goto put_host;
+
+ return host;
+
+put_host:
+ device_del(&host->dev);
+ put_device(&host->dev);
+ return NULL;
+}
+
+static void srp_rename_dev(struct ib_device *device, void *client_data)
+{
+ struct srp_device *srp_dev = client_data;
+ struct srp_host *host, *tmp_host;
+
+ list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
+ char name[IB_DEVICE_NAME_MAX + 8];
+
+ snprintf(name, sizeof(name), "srp-%s-%u",
+ dev_name(&device->dev), host->port);
+ device_rename(&host->dev, name);
+ }
+}
+
+static int srp_add_one(struct ib_device *device)
+{
+ struct srp_device *srp_dev;
+ struct ib_device_attr *attr = &device->attrs;
+ struct srp_host *host;
+ int mr_page_shift;
+ u32 p;
+ u64 max_pages_per_mr;
+ unsigned int flags = 0;
+
+ srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
+ if (!srp_dev)
+ return -ENOMEM;
+
+ /*
+ * Use the smallest page size supported by the HCA, down to a
+ * minimum of 4096 bytes. We're unlikely to build large sglists
+ * out of smaller entries.
+ */
+ mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
+ srp_dev->mr_page_size = 1 << mr_page_shift;
+ srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
+ max_pages_per_mr = attr->max_mr_size;
+ do_div(max_pages_per_mr, srp_dev->mr_page_size);
+ pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
+ attr->max_mr_size, srp_dev->mr_page_size,
+ max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
+ srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
+ max_pages_per_mr);
+
+ srp_dev->has_fr = (attr->device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS);
+ if (!never_register && !srp_dev->has_fr)
+ dev_warn(&device->dev, "FR is not supported\n");
+ else if (!never_register &&
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size)
+ srp_dev->use_fast_reg = srp_dev->has_fr;
+
+ if (never_register || !register_always || !srp_dev->has_fr)
+ flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
+
+ if (srp_dev->use_fast_reg) {
+ srp_dev->max_pages_per_mr =
+ min_t(u32, srp_dev->max_pages_per_mr,
+ attr->max_fast_reg_page_list_len);
+ }
+ srp_dev->mr_max_size = srp_dev->mr_page_size *
+ srp_dev->max_pages_per_mr;
+ pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
+ dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
+ attr->max_fast_reg_page_list_len,
+ srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
+
+ INIT_LIST_HEAD(&srp_dev->dev_list);
+
+ srp_dev->dev = device;
+ srp_dev->pd = ib_alloc_pd(device, flags);
+ if (IS_ERR(srp_dev->pd)) {
+ int ret = PTR_ERR(srp_dev->pd);
+
+ kfree(srp_dev);
+ return ret;
+ }
+
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
+ WARN_ON_ONCE(srp_dev->global_rkey == 0);
+ }
+
+ rdma_for_each_port (device, p) {
+ host = srp_add_port(srp_dev, p);
+ if (host)
+ list_add_tail(&host->list, &srp_dev->dev_list);
+ }
+
+ ib_set_client_data(device, &srp_client, srp_dev);
+ return 0;
+}
+
+static void srp_remove_one(struct ib_device *device, void *client_data)
+{
+ struct srp_device *srp_dev;
+ struct srp_host *host, *tmp_host;
+ struct srp_target_port *target;
+
+ srp_dev = client_data;
+
+ list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
+ /*
+ * Remove the add_target sysfs entry so that no new target ports
+ * can be created.
+ */
+ device_del(&host->dev);
+
+ /*
+ * Remove all target ports.
+ */
+ spin_lock(&host->target_lock);
+ list_for_each_entry(target, &host->target_list, list)
+ srp_queue_remove_work(target);
+ spin_unlock(&host->target_lock);
+
+ /*
+ * srp_queue_remove_work() queues a call to
+ * srp_remove_target(). The latter function cancels
+ * target->tl_err_work so waiting for the remove works to
+ * finish is sufficient.
+ */
+ flush_workqueue(srp_remove_wq);
+
+ put_device(&host->dev);
+ }
+
+ ib_dealloc_pd(srp_dev->pd);
+
+ kfree(srp_dev);
+}
+
+static struct srp_function_template ib_srp_transport_functions = {
+ .has_rport_state = true,
+ .reset_timer_if_blocked = true,
+ .reconnect_delay = &srp_reconnect_delay,
+ .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
+ .dev_loss_tmo = &srp_dev_loss_tmo,
+ .reconnect = srp_rport_reconnect,
+ .rport_delete = srp_rport_delete,
+ .terminate_rport_io = srp_terminate_io,
+};
+
+static int __init srp_init_module(void)
+{
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
+ BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
+ BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
+ BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
+ BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
+ BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
+ BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
+
+ if (srp_sg_tablesize) {
+ pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
+ if (!cmd_sg_entries)
+ cmd_sg_entries = srp_sg_tablesize;
+ }
+
+ if (!cmd_sg_entries)
+ cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
+
+ if (cmd_sg_entries > 255) {
+ pr_warn("Clamping cmd_sg_entries to 255\n");
+ cmd_sg_entries = 255;
+ }
+
+ if (!indirect_sg_entries)
+ indirect_sg_entries = cmd_sg_entries;
+ else if (indirect_sg_entries < cmd_sg_entries) {
+ pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
+ cmd_sg_entries);
+ indirect_sg_entries = cmd_sg_entries;
+ }
+
+ if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+ pr_warn("Clamping indirect_sg_entries to %u\n",
+ SG_MAX_SEGMENTS);
+ indirect_sg_entries = SG_MAX_SEGMENTS;
+ }
+
+ srp_remove_wq = create_workqueue("srp_remove");
+ if (!srp_remove_wq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ ib_srp_transport_template =
+ srp_attach_transport(&ib_srp_transport_functions);
+ if (!ib_srp_transport_template)
+ goto destroy_wq;
+
+ ret = class_register(&srp_class);
+ if (ret) {
+ pr_err("couldn't register class infiniband_srp\n");
+ goto release_tr;
+ }
+
+ ib_sa_register_client(&srp_sa_client);
+
+ ret = ib_register_client(&srp_client);
+ if (ret) {
+ pr_err("couldn't register IB client\n");
+ goto unreg_sa;
+ }
+
+out:
+ return ret;
+
+unreg_sa:
+ ib_sa_unregister_client(&srp_sa_client);
+ class_unregister(&srp_class);
+
+release_tr:
+ srp_release_transport(ib_srp_transport_template);
+
+destroy_wq:
+ destroy_workqueue(srp_remove_wq);
+ goto out;
+}
+
+static void __exit srp_cleanup_module(void)
+{
+ ib_unregister_client(&srp_client);
+ ib_sa_unregister_client(&srp_sa_client);
+ class_unregister(&srp_class);
+ srp_release_transport(ib_srp_transport_template);
+ destroy_workqueue(srp_remove_wq);
+}
+
+module_init(srp_init_module);
+module_exit(srp_cleanup_module);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
new file mode 100644
index 000000000..5d94db453
--- /dev/null
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_SRP_H
+#define IB_SRP_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
+#include <rdma/rdma_cm.h>
+
+enum {
+ SRP_PATH_REC_TIMEOUT_MS = 1000,
+ SRP_ABORT_TIMEOUT_MS = 5000,
+
+ SRP_PORT_REDIRECT = 1,
+ SRP_DLID_REDIRECT = 2,
+ SRP_STALE_CONN = 3,
+
+ SRP_DEF_SG_TABLESIZE = 12,
+
+ SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
+ SRP_RSP_SQ_SIZE = 1,
+ SRP_TSK_MGMT_SQ_SIZE = 1,
+ SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
+ SRP_TSK_MGMT_SQ_SIZE,
+
+ SRP_MAX_PAGES_PER_MR = 512,
+
+ SRP_MAX_ADD_CDB_LEN = 16,
+
+ SRP_MAX_IMM_SGE = 2,
+ SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1,
+ /*
+ * Choose the immediate data offset such that a 32 byte CDB still fits.
+ */
+ SRP_IMM_DATA_OFFSET = sizeof(struct srp_cmd) +
+ SRP_MAX_ADD_CDB_LEN +
+ sizeof(struct srp_imm_buf),
+};
+
+enum {
+ SRP_TAG_NO_REQ = ~0U,
+ SRP_TAG_TSK_MGMT = BIT(31),
+};
+
+enum srp_target_state {
+ SRP_TARGET_SCANNING,
+ SRP_TARGET_LIVE,
+ SRP_TARGET_REMOVED,
+};
+
+enum srp_iu_type {
+ SRP_IU_CMD,
+ SRP_IU_TSK_MGMT,
+ SRP_IU_RSP,
+};
+
+/*
+ * RDMA adapter in the initiator system.
+ *
+ * @dev_list: List of RDMA ports associated with this RDMA adapter (srp_host).
+ * @mr_page_mask: HCA memory registration page mask.
+ * @mr_page_size: HCA memory registration page size.
+ * @mr_max_size: Maximum size in bytes of a single FR registration request.
+ */
+struct srp_device {
+ struct list_head dev_list;
+ struct ib_device *dev;
+ struct ib_pd *pd;
+ u32 global_rkey;
+ u64 mr_page_mask;
+ int mr_page_size;
+ int mr_max_size;
+ int max_pages_per_mr;
+ bool has_fr;
+ bool use_fast_reg;
+};
+
+/*
+ * One port of an RDMA adapter in the initiator system.
+ *
+ * @target_list: List of connected target ports (struct srp_target_port).
+ * @target_lock: Protects @target_list.
+ */
+struct srp_host {
+ struct srp_device *srp_dev;
+ u32 port;
+ struct device dev;
+ struct list_head target_list;
+ spinlock_t target_lock;
+ struct list_head list;
+ struct mutex add_target_mutex;
+};
+
+struct srp_request {
+ struct scsi_cmnd *scmnd;
+ struct srp_iu *cmd;
+ struct srp_fr_desc **fr_list;
+ struct srp_direct_buf *indirect_desc;
+ dma_addr_t indirect_dma_addr;
+ short nmdesc;
+ struct ib_cqe reg_cqe;
+};
+
+/**
+ * struct srp_rdma_ch
+ * @comp_vector: Completion vector used by this RDMA channel.
+ * @max_it_iu_len: Maximum initiator-to-target information unit length.
+ * @max_ti_iu_len: Maximum target-to-initiator information unit length.
+ */
+struct srp_rdma_ch {
+ /* These are RW in the hot path, and commonly used together */
+ struct list_head free_tx;
+ spinlock_t lock;
+ s32 req_lim;
+
+ /* These are read-only in the hot path */
+ struct srp_target_port *target ____cacheline_aligned_in_smp;
+ struct ib_cq *send_cq;
+ struct ib_cq *recv_cq;
+ struct ib_qp *qp;
+ struct srp_fr_pool *fr_pool;
+ uint32_t max_it_iu_len;
+ uint32_t max_ti_iu_len;
+ u8 max_imm_sge;
+ bool use_imm_data;
+
+ /* Everything above this point is used in the hot path of
+ * command processing. Try to keep them packed into cachelines.
+ */
+
+ struct completion done;
+ int status;
+
+ union {
+ struct ib_cm {
+ struct sa_path_rec path;
+ struct ib_sa_query *path_query;
+ int path_query_id;
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ struct rdma_cm {
+ struct rdma_cm_id *cm_id;
+ } rdma_cm;
+ };
+
+ struct srp_iu **tx_ring;
+ struct srp_iu **rx_ring;
+ int comp_vector;
+
+ u64 tsk_mgmt_tag;
+ struct completion tsk_mgmt_done;
+ u8 tsk_mgmt_status;
+ bool connected;
+};
+
+/**
+ * struct srp_target_port - RDMA port in the SRP target system
+ * @comp_vector: Completion vector used by the first RDMA channel created for
+ * this target port.
+ */
+struct srp_target_port {
+ /* read and written in the hot path */
+ spinlock_t lock;
+
+ /* read only in the hot path */
+ u32 global_rkey;
+ struct srp_rdma_ch *ch;
+ struct net *net;
+ u32 ch_count;
+ u32 lkey;
+ enum srp_target_state state;
+ uint32_t max_it_iu_size;
+ unsigned int cmd_sg_cnt;
+ unsigned int indirect_size;
+ bool allow_ext_sg;
+
+ /* other member variables */
+ union ib_gid sgid;
+ __be64 id_ext;
+ __be64 ioc_guid;
+ __be64 initiator_ext;
+ u16 io_class;
+ struct srp_host *srp_host;
+ struct Scsi_Host *scsi_host;
+ struct srp_rport *rport;
+ char target_name[32];
+ unsigned int scsi_id;
+ unsigned int sg_tablesize;
+ unsigned int target_can_queue;
+ int mr_pool_size;
+ int mr_per_cmd;
+ int queue_size;
+ int comp_vector;
+ int tl_retry_count;
+
+ bool using_rdma_cm;
+
+ union {
+ struct {
+ __be64 service_id;
+ union ib_gid orig_dgid;
+ __be16 pkey;
+ } ib_cm;
+ struct {
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr sa;
+ struct sockaddr_storage ss;
+ } src;
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr sa;
+ struct sockaddr_storage ss;
+ } dst;
+ bool src_specified;
+ } rdma_cm;
+ };
+
+ u32 rq_tmo_jiffies;
+
+ int zero_req_lim;
+
+ struct work_struct tl_err_work;
+ struct work_struct remove_work;
+
+ struct list_head list;
+ bool qp_in_error;
+};
+
+struct srp_iu {
+ struct list_head list;
+ u64 dma;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+ u32 num_sge;
+ struct ib_sge sge[SRP_MAX_SGE];
+ struct ib_cqe cqe;
+};
+
+/**
+ * struct srp_fr_desc - fast registration work request arguments
+ * @entry: Entry in srp_fr_pool.free_list.
+ * @mr: Memory region.
+ * @frpl: Fast registration page list.
+ */
+struct srp_fr_desc {
+ struct list_head entry;
+ struct ib_mr *mr;
+};
+
+/**
+ * struct srp_fr_pool - pool of fast registration descriptors
+ *
+ * An entry is available for allocation if and only if it occurs in @free_list.
+ *
+ * @size: Number of descriptors in this pool.
+ * @max_page_list_len: Maximum fast registration work request page list length.
+ * @lock: Protects free_list.
+ * @free_list: List of free descriptors.
+ * @desc: Fast registration descriptor pool.
+ */
+struct srp_fr_pool {
+ int size;
+ int max_page_list_len;
+ spinlock_t lock;
+ struct list_head free_list;
+ struct srp_fr_desc desc[];
+};
+
+/**
+ * struct srp_map_state - per-request DMA memory mapping state
+ * @desc: Pointer to the element of the SRP buffer descriptor array
+ * that is being filled in.
+ * @pages: Array with DMA addresses of pages being considered for
+ * memory registration.
+ * @base_dma_addr: DMA address of the first page that has not yet been mapped.
+ * @dma_len: Number of bytes that will be registered with the next FR
+ * memory registration call.
+ * @total_len: Total number of bytes in the sg-list being mapped.
+ * @npages: Number of page addresses in the pages[] array.
+ * @nmdesc: Number of FR memory descriptors used for mapping.
+ * @ndesc: Number of SRP buffer descriptors that have been filled in.
+ */
+struct srp_map_state {
+ union {
+ struct {
+ struct srp_fr_desc **next;
+ struct srp_fr_desc **end;
+ } fr;
+ struct {
+ void **next;
+ void **end;
+ } gen;
+ };
+ struct srp_direct_buf *desc;
+ union {
+ u64 *pages;
+ struct scatterlist *sg;
+ };
+ dma_addr_t base_dma_addr;
+ u32 dma_len;
+ u32 total_len;
+ unsigned int npages;
+ unsigned int nmdesc;
+ unsigned int ndesc;
+};
+
+#endif /* IB_SRP_H */
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 000000000..4b5d9b792
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_SRPT
+ tristate "InfiniBand SCSI RDMA Protocol target support"
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
+ help
+
+ Support for the SCSI RDMA Protocol (SRP) Target driver. The
+ SRP protocol is a protocol that allows an initiator to access
+ a block storage device on another host (target) over a network
+ that supports the RDMA protocol. Currently the RDMA protocol is
+ supported by InfiniBand and by iWarp network hardware. More
+ information about the SRP protocol can be found on the website
+ of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 000000000..2d137928a
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 000000000..fb1de1f6f
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_DM_MAD_H
+#define IB_DM_MAD_H
+
+#include <linux/types.h>
+
+#include <rdma/ib_mad.h>
+
+enum {
+ /*
+ * See also section 13.4.7 Status Field, table 115 MAD Common Status
+ * Field Bit Values and also section 16.3.1.1 Status Field in the
+ * InfiniBand Architecture Specification.
+ */
+ DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
+ DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
+ DM_MAD_STATUS_INVALID_FIELD = 0x001c,
+ DM_MAD_STATUS_NO_IOC = 0x0100,
+
+ /*
+ * See also the Device Management chapter, section 16.3.3 Attributes,
+ * table 279 Device Management Attributes in the InfiniBand
+ * Architecture Specification.
+ */
+ DM_ATTR_CLASS_PORT_INFO = 0x01,
+ DM_ATTR_IOU_INFO = 0x10,
+ DM_ATTR_IOC_PROFILE = 0x11,
+ DM_ATTR_SVC_ENTRIES = 0x12
+};
+
+struct ib_dm_hdr {
+ u8 reserved[28];
+};
+
+/*
+ * Structure of management datagram sent by the SRP target implementation.
+ * Contains a management datagram header, reliable multi-packet transaction
+ * protocol (RMPP) header and ib_dm_hdr. Notes:
+ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
+ * management datagrams.
+ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
+ * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
+ * - The maximum supported size for a management datagram when not using RMPP
+ * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
+ */
+struct ib_dm_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ struct ib_dm_hdr dm_hdr;
+ u8 data[IB_MGMT_DEVICE_DATA];
+};
+
+/*
+ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
+ * Architecture Specification.
+ */
+struct ib_dm_iou_info {
+ __be16 change_id;
+ u8 max_controllers;
+ u8 op_rom;
+ u8 controller_list[128];
+};
+
+/*
+ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
+ * the InfiniBand Architecture Specification.
+ */
+struct ib_dm_ioc_profile {
+ __be64 guid;
+ __be32 vendor_id;
+ __be32 device_id;
+ __be16 device_version;
+ __be16 reserved1;
+ __be32 subsys_vendor_id;
+ __be32 subsys_device_id;
+ __be16 io_class;
+ __be16 io_subclass;
+ __be16 protocol;
+ __be16 protocol_version;
+ __be16 service_conn;
+ __be16 initiators_supported;
+ __be16 send_queue_depth;
+ u8 reserved2;
+ u8 rdma_read_depth;
+ __be32 send_size;
+ __be32 rdma_size;
+ u8 op_cap_mask;
+ u8 svc_cap_mask;
+ u8 num_svc_entries;
+ u8 reserved3[9];
+ u8 id_string[64];
+};
+
+struct ib_dm_svc_entry {
+ u8 name[40];
+ __be64 id;
+};
+
+/*
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
+ */
+struct ib_dm_svc_entries {
+ struct ib_dm_svc_entry service_entries[4];
+};
+
+#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 000000000..25e799dba
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,3958 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/kthread.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/inet.h>
+#include <rdma/ib_cache.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "ib_srpt.h"
+
+/* Name of this kernel module. */
+#define DRV_NAME "ib_srpt"
+
+#define SRPT_ID_STRING "Linux SRP target"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME " " fmt
+
+MODULE_AUTHOR("Vu Pham and Bart Van Assche");
+MODULE_DESCRIPTION("SCSI RDMA Protocol target driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * Global Variables
+ */
+
+static u64 srpt_service_guid;
+static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
+static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
+
+static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+module_param(srp_max_req_size, int, 0444);
+MODULE_PARM_DESC(srp_max_req_size,
+ "Maximum size of SRP request messages in bytes.");
+
+static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
+module_param(srpt_srq_size, int, 0444);
+MODULE_PARM_DESC(srpt_srq_size,
+ "Shared receive queue (SRQ) size.");
+
+static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
+{
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
+}
+module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+ 0444);
+MODULE_PARM_DESC(srpt_service_guid,
+ "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
+
+static struct ib_client srpt_client;
+/* Protects both rdma_cm_port and rdma_cm_id. */
+static DEFINE_MUTEX(rdma_cm_mutex);
+/* Port number RDMA/CM will bind to. */
+static u16 rdma_cm_port;
+static struct rdma_cm_id *rdma_cm_id;
+static void srpt_release_cmd(struct se_cmd *se_cmd);
+static void srpt_free_ch(struct kref *kref);
+static int srpt_queue_status(struct se_cmd *cmd);
+static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
+
+/*
+ * The only allowed channel state changes are those that change the channel
+ * state into a state with a higher numerical value. Hence the new > prev test.
+ */
+static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
+{
+ unsigned long flags;
+ enum rdma_ch_state prev;
+ bool changed = false;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev = ch->state;
+ if (new > prev) {
+ ch->state = new;
+ changed = true;
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ return changed;
+}
+
+/**
+ * srpt_event_handler - asynchronous IB event callback function
+ * @handler: IB event handler registered by ib_register_event_handler().
+ * @event: Description of the event that occurred.
+ *
+ * Callback function called by the InfiniBand core when an asynchronous IB
+ * event occurs. This callback may occur in interrupt context. See also
+ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
+ * Architecture Specification.
+ */
+static void srpt_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ struct srpt_device *sdev =
+ container_of(handler, struct srpt_device, event_handler);
+ struct srpt_port *sport;
+ u8 port_num;
+
+ pr_debug("ASYNC event= %d on device= %s\n", event->event,
+ dev_name(&sdev->device->dev));
+
+ switch (event->event) {
+ case IB_EVENT_PORT_ERR:
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
+ sport->lid = 0;
+ sport->sm_lid = 0;
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
+ }
+ break;
+ case IB_EVENT_PORT_ACTIVE:
+ case IB_EVENT_LID_CHANGE:
+ case IB_EVENT_PKEY_CHANGE:
+ case IB_EVENT_SM_CHANGE:
+ case IB_EVENT_CLIENT_REREGISTER:
+ case IB_EVENT_GID_CHANGE:
+ /* Refresh port data asynchronously. */
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
+ if (!sport->lid && !sport->sm_lid)
+ schedule_work(&sport->work);
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
+ }
+ break;
+ default:
+ pr_err("received unrecognized IB event %d\n", event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_srq_event - SRQ event callback function
+ * @event: Description of the event that occurred.
+ * @ctx: Context pointer specified at SRQ creation time.
+ */
+static void srpt_srq_event(struct ib_event *event, void *ctx)
+{
+ pr_debug("SRQ event %d\n", event->event);
+}
+
+static const char *get_ch_state_name(enum rdma_ch_state s)
+{
+ switch (s) {
+ case CH_CONNECTING:
+ return "connecting";
+ case CH_LIVE:
+ return "live";
+ case CH_DISCONNECTING:
+ return "disconnecting";
+ case CH_DRAINING:
+ return "draining";
+ case CH_DISCONNECTED:
+ return "disconnected";
+ }
+ return "???";
+}
+
+/**
+ * srpt_qp_event - QP event callback function
+ * @event: Description of the event that occurred.
+ * @ch: SRPT RDMA channel.
+ */
+static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+{
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ if (ch->using_rdma_cm)
+ rdma_notify(ch->rdma_cm.cm_id, event->event);
+ else
+ ib_cm_notify(ch->ib_cm.cm_id, event->event);
+ break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ pr_debug("%s-%d, state %s: received Last WQE event.\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+ break;
+ default:
+ pr_err("received unrecognized IB QP event %d\n", event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_set_ioc - initialize a IOUnitInfo structure
+ * @c_list: controller list.
+ * @slot: one-based slot number.
+ * @value: four-bit value.
+ *
+ * Copies the lowest four bits of value in element slot of the array of four
+ * bit elements called c_list (controller list). The index slot is one-based.
+ */
+static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
+{
+ u16 id;
+ u8 tmp;
+
+ id = (slot - 1) / 2;
+ if (slot & 0x1) {
+ tmp = c_list[id] & 0xf;
+ c_list[id] = (value << 4) | tmp;
+ } else {
+ tmp = c_list[id] & 0xf0;
+ c_list[id] = (value & 0xf) | tmp;
+ }
+}
+
+/**
+ * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
+ *
+ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
+ * Specification.
+ */
+static void srpt_get_class_port_info(struct ib_dm_mad *mad)
+{
+ struct ib_class_port_info *cif;
+
+ cif = (struct ib_class_port_info *)mad->data;
+ memset(cif, 0, sizeof(*cif));
+ cif->base_version = 1;
+ cif->class_version = 1;
+
+ ib_set_cpi_resp_time(cif, 20);
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_iou - write IOUnitInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
+ *
+ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.6 in the SRP r16a document.
+ */
+static void srpt_get_iou(struct ib_dm_mad *mad)
+{
+ struct ib_dm_iou_info *ioui;
+ u8 slot;
+ int i;
+
+ ioui = (struct ib_dm_iou_info *)mad->data;
+ ioui->change_id = cpu_to_be16(1);
+ ioui->max_controllers = 16;
+
+ /* set present for slot 1 and empty for the rest */
+ srpt_set_ioc(ioui->controller_list, 1, 1);
+ for (i = 1, slot = 2; i < 16; i++, slot++)
+ srpt_set_ioc(ioui->controller_list, slot, 0);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_ioc - write IOControllerprofile to a management datagram
+ * @sport: HCA port through which the MAD has been received.
+ * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
+ *
+ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
+ * Architecture Specification. See also section B.7, table B.7 in the SRP
+ * r16a document.
+ */
+static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
+ struct ib_dm_mad *mad)
+{
+ struct srpt_device *sdev = sport->sdev;
+ struct ib_dm_ioc_profile *iocp;
+ int send_queue_depth;
+
+ iocp = (struct ib_dm_ioc_profile *)mad->data;
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2) {
+ mad->mad_hdr.status
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ if (sdev->use_srq)
+ send_queue_depth = sdev->srq_size;
+ else
+ send_queue_depth = min(MAX_SRPT_RQ_SIZE,
+ sdev->device->attrs.max_qp_wr);
+
+ memset(iocp, 0, sizeof(*iocp));
+ strcpy(iocp->id_string, SRPT_ID_STRING);
+ iocp->guid = cpu_to_be64(srpt_service_guid);
+ iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
+ iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
+ iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
+ iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
+ iocp->subsys_device_id = 0x0;
+ iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+ iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
+ iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
+ iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
+ iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
+ iocp->rdma_read_depth = 4;
+ iocp->send_size = cpu_to_be32(srp_max_req_size);
+ iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
+ 1U << 24));
+ iocp->num_svc_entries = 1;
+ iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
+ SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_svc_entries - write ServiceEntries to a management datagram
+ * @ioc_guid: I/O controller GUID to use in reply.
+ * @slot: I/O controller number.
+ * @hi: End of the range of service entries to be specified in the reply.
+ * @lo: Start of the range of service entries to be specified in the reply..
+ * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
+ *
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the SRP r16a document.
+ */
+static void srpt_get_svc_entries(u64 ioc_guid,
+ u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
+{
+ struct ib_dm_svc_entries *svc_entries;
+
+ WARN_ON(!ioc_guid);
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2 || lo > hi || hi > 1) {
+ mad->mad_hdr.status
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ svc_entries = (struct ib_dm_svc_entries *)mad->data;
+ memset(svc_entries, 0, sizeof(*svc_entries));
+ svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
+ snprintf(svc_entries->service_entries[0].name,
+ sizeof(svc_entries->service_entries[0].name),
+ "%s%016llx",
+ SRP_SERVICE_NAME_PREFIX,
+ ioc_guid);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_mgmt_method_get - process a received management datagram
+ * @sp: HCA port through which the MAD has been received.
+ * @rq_mad: received MAD.
+ * @rsp_mad: response MAD.
+ */
+static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
+ struct ib_dm_mad *rsp_mad)
+{
+ u16 attr_id;
+ u32 slot;
+ u8 hi, lo;
+
+ attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
+ switch (attr_id) {
+ case DM_ATTR_CLASS_PORT_INFO:
+ srpt_get_class_port_info(rsp_mad);
+ break;
+ case DM_ATTR_IOU_INFO:
+ srpt_get_iou(rsp_mad);
+ break;
+ case DM_ATTR_IOC_PROFILE:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ srpt_get_ioc(sp, slot, rsp_mad);
+ break;
+ case DM_ATTR_SVC_ENTRIES:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ hi = (u8) ((slot >> 8) & 0xff);
+ lo = (u8) (slot & 0xff);
+ slot = (u16) ((slot >> 16) & 0xffff);
+ srpt_get_svc_entries(srpt_service_guid,
+ slot, hi, lo, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status =
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ }
+}
+
+/**
+ * srpt_mad_send_handler - MAD send completion callback
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @mad_wc: Work completion reporting that the MAD has been sent.
+ */
+static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_wc *mad_wc)
+{
+ rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
+ ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * srpt_mad_recv_handler - MAD reception callback function
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @send_buf: Not used.
+ * @mad_wc: Work completion reporting that a MAD has been received.
+ */
+static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_buf *send_buf,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
+ struct ib_ah *ah;
+ struct ib_mad_send_buf *rsp;
+ struct ib_dm_mad *dm_mad;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad)
+ return;
+
+ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+ mad_wc->recv_buf.grh, mad_agent->port_num);
+ if (IS_ERR(ah))
+ goto err;
+
+ BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
+
+ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+ mad_wc->wc->pkey_index, 0,
+ IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
+ GFP_KERNEL,
+ IB_MGMT_BASE_VERSION);
+ if (IS_ERR(rsp))
+ goto err_rsp;
+
+ rsp->ah = ah;
+
+ dm_mad = rsp->mad;
+ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
+ dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+ dm_mad->mad_hdr.status = 0;
+
+ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
+ break;
+ case IB_MGMT_METHOD_SET:
+ dm_mad->mad_hdr.status =
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ default:
+ dm_mad->mad_hdr.status =
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+ break;
+ }
+
+ if (!ib_post_send_mad(rsp, NULL)) {
+ ib_free_recv_mad(mad_wc);
+ /* will destroy_ah & free_send_mad in send completion */
+ return;
+ }
+
+ ib_free_send_mad(rsp);
+
+err_rsp:
+ rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
+err:
+ ib_free_recv_mad(mad_wc);
+}
+
+static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
+{
+ const __be16 *g = (const __be16 *)guid;
+
+ return snprintf(buf, size, "%04x:%04x:%04x:%04x",
+ be16_to_cpu(g[0]), be16_to_cpu(g[1]),
+ be16_to_cpu(g[2]), be16_to_cpu(g[3]));
+}
+
+/**
+ * srpt_refresh_port - configure a HCA port
+ * @sport: SRPT HCA port.
+ *
+ * Enable InfiniBand management datagram processing, update the cached sm_lid,
+ * lid and gid values, and register a callback function for processing MADs
+ * on the specified port.
+ *
+ * Note: It is safe to call this function more than once for the same port.
+ */
+static int srpt_refresh_port(struct srpt_port *sport)
+{
+ struct ib_mad_agent *mad_agent;
+ struct ib_mad_reg_req reg_req;
+ struct ib_port_modify port_modify;
+ struct ib_port_attr port_attr;
+ int ret;
+
+ ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
+ if (ret)
+ return ret;
+
+ sport->sm_lid = port_attr.sm_lid;
+ sport->lid = port_attr.lid;
+
+ ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
+ if (ret)
+ return ret;
+
+ srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
+ &sport->gid.global.interface_id);
+ snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
+ "0x%016llx%016llx",
+ be64_to_cpu(sport->gid.global.subnet_prefix),
+ be64_to_cpu(sport->gid.global.interface_id));
+
+ if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
+ return 0;
+
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret) {
+ pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port, ret);
+ return 0;
+ }
+
+ if (!sport->mad_agent) {
+ memset(&reg_req, 0, sizeof(reg_req));
+ reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
+ reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
+ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+ mad_agent = ib_register_mad_agent(sport->sdev->device,
+ sport->port,
+ IB_QPT_GSI,
+ &reg_req, 0,
+ srpt_mad_send_handler,
+ srpt_mad_recv_handler,
+ sport, 0);
+ if (IS_ERR(mad_agent)) {
+ pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ PTR_ERR(mad_agent));
+ sport->mad_agent = NULL;
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0,
+ &port_modify);
+ return 0;
+ }
+
+ sport->mad_agent = mad_agent;
+ }
+
+ return 0;
+}
+
+/**
+ * srpt_unregister_mad_agent - unregister MAD callback functions
+ * @sdev: SRPT HCA pointer.
+ * @port_cnt: number of ports with registered MAD
+ *
+ * Note: It is safe to call this function more than once for the same device.
+ */
+static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
+{
+ struct ib_port_modify port_modify = {
+ .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
+ };
+ struct srpt_port *sport;
+ int i;
+
+ for (i = 1; i <= port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ WARN_ON(sport->port != i);
+ if (sport->mad_agent) {
+ ib_modify_port(sdev->device, i, 0, &port_modify);
+ ib_unregister_mad_agent(sport->mad_agent);
+ sport->mad_agent = NULL;
+ }
+ }
+}
+
+/**
+ * srpt_alloc_ioctx - allocate a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx_size: I/O context size.
+ * @buf_cache: I/O buffer cache.
+ * @dir: DMA data direction.
+ */
+static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
+ int ioctx_size,
+ struct kmem_cache *buf_cache,
+ enum dma_data_direction dir)
+{
+ struct srpt_ioctx *ioctx;
+
+ ioctx = kzalloc(ioctx_size, GFP_KERNEL);
+ if (!ioctx)
+ goto err;
+
+ ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
+ if (!ioctx->buf)
+ goto err_free_ioctx;
+
+ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
+ kmem_cache_size(buf_cache), dir);
+ if (ib_dma_mapping_error(sdev->device, ioctx->dma))
+ goto err_free_buf;
+
+ return ioctx;
+
+err_free_buf:
+ kmem_cache_free(buf_cache, ioctx->buf);
+err_free_ioctx:
+ kfree(ioctx);
+err:
+ return NULL;
+}
+
+/**
+ * srpt_free_ioctx - free a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx: I/O context pointer.
+ * @buf_cache: I/O buffer cache.
+ * @dir: DMA data direction.
+ */
+static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
+ struct kmem_cache *buf_cache,
+ enum dma_data_direction dir)
+{
+ if (!ioctx)
+ return;
+
+ ib_dma_unmap_single(sdev->device, ioctx->dma,
+ kmem_cache_size(buf_cache), dir);
+ kmem_cache_free(buf_cache, ioctx->buf);
+ kfree(ioctx);
+}
+
+/**
+ * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
+ * @sdev: Device to allocate the I/O context ring for.
+ * @ring_size: Number of elements in the I/O context ring.
+ * @ioctx_size: I/O context size.
+ * @buf_cache: I/O buffer cache.
+ * @alignment_offset: Offset in each ring buffer at which the SRP information
+ * unit starts.
+ * @dir: DMA data direction.
+ */
+static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
+ int ring_size, int ioctx_size,
+ struct kmem_cache *buf_cache,
+ int alignment_offset,
+ enum dma_data_direction dir)
+{
+ struct srpt_ioctx **ring;
+ int i;
+
+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
+ ioctx_size != sizeof(struct srpt_send_ioctx));
+
+ ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
+ if (!ring)
+ goto out;
+ for (i = 0; i < ring_size; ++i) {
+ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
+ if (!ring[i])
+ goto err;
+ ring[i]->index = i;
+ ring[i]->offset = alignment_offset;
+ }
+ goto out;
+
+err:
+ while (--i >= 0)
+ srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
+ kvfree(ring);
+ ring = NULL;
+out:
+ return ring;
+}
+
+/**
+ * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
+ * @ioctx_ring: I/O context ring to be freed.
+ * @sdev: SRPT HCA pointer.
+ * @ring_size: Number of ring elements.
+ * @buf_cache: I/O buffer cache.
+ * @dir: DMA data direction.
+ */
+static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
+ struct srpt_device *sdev, int ring_size,
+ struct kmem_cache *buf_cache,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ if (!ioctx_ring)
+ return;
+
+ for (i = 0; i < ring_size; ++i)
+ srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
+ kvfree(ioctx_ring);
+}
+
+/**
+ * srpt_set_cmd_state - set the state of a SCSI command
+ * @ioctx: Send I/O context.
+ * @new: New I/O context state.
+ *
+ * Does not modify the state of aborted commands. Returns the previous command
+ * state.
+ */
+static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+
+ previous = ioctx->state;
+ if (previous != SRPT_STATE_DONE)
+ ioctx->state = new;
+
+ return previous;
+}
+
+/**
+ * srpt_test_and_set_cmd_state - test and set the state of a command
+ * @ioctx: Send I/O context.
+ * @old: Current I/O context state.
+ * @new: New I/O context state.
+ *
+ * Returns true if and only if the previous command state was equal to 'old'.
+ */
+static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state old,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+
+ WARN_ON(!ioctx);
+ WARN_ON(old == SRPT_STATE_DONE);
+ WARN_ON(new == SRPT_STATE_NEW);
+
+ previous = ioctx->state;
+ if (previous == old)
+ ioctx->state = new;
+
+ return previous == old;
+}
+
+/**
+ * srpt_post_recv - post an IB receive request
+ * @sdev: SRPT HCA pointer.
+ * @ch: SRPT RDMA channel.
+ * @ioctx: Receive I/O context pointer.
+ */
+static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *ioctx)
+{
+ struct ib_sge list;
+ struct ib_recv_wr wr;
+
+ BUG_ON(!sdev);
+ list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
+ list.length = srp_max_req_size;
+ list.lkey = sdev->lkey;
+
+ ioctx->ioctx.cqe.done = srpt_recv_done;
+ wr.wr_cqe = &ioctx->ioctx.cqe;
+ wr.next = NULL;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ if (sdev->use_srq)
+ return ib_post_srq_recv(sdev->srq, &wr, NULL);
+ else
+ return ib_post_recv(ch->qp, &wr, NULL);
+}
+
+/**
+ * srpt_zerolength_write - perform a zero-length RDMA write
+ * @ch: SRPT RDMA channel.
+ *
+ * A quote from the InfiniBand specification: C9-88: For an HCA responder
+ * using Reliable Connection service, for each zero-length RDMA READ or WRITE
+ * request, the R_Key shall not be validated, even if the request includes
+ * Immediate data.
+ */
+static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
+{
+ struct ib_rdma_wr wr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &ch->zw_cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ .send_flags = IB_SEND_SIGNALED,
+ }
+ };
+
+ pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ ch->qp->qp_num);
+
+ return ib_post_send(ch->qp, &wr.wr, NULL);
+}
+
+static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
+
+ pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
+ wc->status);
+
+ if (wc->status == IB_WC_SUCCESS) {
+ srpt_process_wait_list(ch);
+ } else {
+ if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+ schedule_work(&ch->release_work);
+ else
+ pr_debug("%s-%d: already disconnected.\n",
+ ch->sess_name, ch->qp->qp_num);
+ }
+}
+
+static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
+ struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
+ unsigned *sg_cnt)
+{
+ enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct scatterlist *prev = NULL;
+ unsigned prev_nents;
+ int ret, i;
+
+ if (nbufs == 1) {
+ ioctx->rw_ctxs = &ioctx->s_rw_ctx;
+ } else {
+ ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
+ GFP_KERNEL);
+ if (!ioctx->rw_ctxs)
+ return -ENOMEM;
+ }
+
+ for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+ u64 remote_addr = be64_to_cpu(db->va);
+ u32 size = be32_to_cpu(db->len);
+ u32 rkey = be32_to_cpu(db->key);
+
+ ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
+ i < nbufs - 1);
+ if (ret)
+ goto unwind;
+
+ ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
+ if (ret < 0) {
+ target_free_sgl(ctx->sg, ctx->nents);
+ goto unwind;
+ }
+
+ ioctx->n_rdma += ret;
+ ioctx->n_rw_ctx++;
+
+ if (prev) {
+ sg_unmark_end(&prev[prev_nents - 1]);
+ sg_chain(prev, prev_nents + 1, ctx->sg);
+ } else {
+ *sg = ctx->sg;
+ }
+
+ prev = ctx->sg;
+ prev_nents = ctx->nents;
+
+ *sg_cnt += ctx->nents;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, dir);
+ target_free_sgl(ctx->sg, ctx->nents);
+ }
+ if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
+ kfree(ioctx->rw_ctxs);
+ return ret;
+}
+
+static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
+ int i;
+
+ for (i = 0; i < ioctx->n_rw_ctx; i++) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, dir);
+ target_free_sgl(ctx->sg, ctx->nents);
+ }
+
+ if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
+ kfree(ioctx->rw_ctxs);
+}
+
+static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
+{
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
+ !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ /*
+ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+ * CDB LENGTH' field are reserved and the size in bytes of this field
+ * is four times the value specified in bits 3..7. Hence the "& ~3".
+ */
+ return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
+}
+
+/**
+ * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
+ * @recv_ioctx: I/O context associated with the received command @srp_cmd.
+ * @ioctx: I/O context that will be used for responding to the initiator.
+ * @srp_cmd: Pointer to the SRP_CMD request data.
+ * @dir: Pointer to the variable to which the transfer direction will be
+ * written.
+ * @sg: [out] scatterlist for the parsed SRP_CMD.
+ * @sg_cnt: [out] length of @sg.
+ * @data_len: Pointer to the variable to which the total data length of all
+ * descriptors in the SRP_CMD request will be written.
+ * @imm_data_offset: [in] Offset in SRP_CMD requests at which immediate data
+ * starts.
+ *
+ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
+ *
+ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
+ * -ENOMEM when memory allocation fails and zero upon success.
+ */
+static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *ioctx,
+ struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
+ struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len,
+ u16 imm_data_offset)
+{
+ BUG_ON(!dir);
+ BUG_ON(!data_len);
+
+ /*
+ * The lower four bits of the buffer format field contain the DATA-IN
+ * buffer descriptor format, and the highest four bits contain the
+ * DATA-OUT buffer descriptor format.
+ */
+ if (srp_cmd->buf_fmt & 0xf)
+ /* DATA-IN: transfer data from target to initiator (read). */
+ *dir = DMA_FROM_DEVICE;
+ else if (srp_cmd->buf_fmt >> 4)
+ /* DATA-OUT: transfer data from initiator to target (write). */
+ *dir = DMA_TO_DEVICE;
+ else
+ *dir = DMA_NONE;
+
+ /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
+ ioctx->cmd.data_direction = *dir;
+
+ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+ struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
+
+ *data_len = be32_to_cpu(db->len);
+ return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
+ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+ struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
+ int nbufs = be32_to_cpu(idb->table_desc.len) /
+ sizeof(struct srp_direct_buf);
+
+ if (nbufs >
+ (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
+ pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n",
+ srp_cmd->data_out_desc_cnt,
+ srp_cmd->data_in_desc_cnt,
+ be32_to_cpu(idb->table_desc.len),
+ sizeof(struct srp_direct_buf));
+ return -EINVAL;
+ }
+
+ *data_len = be32_to_cpu(idb->len);
+ return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
+ sg, sg_cnt);
+ } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) {
+ struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd);
+ void *data = (void *)srp_cmd + imm_data_offset;
+ uint32_t len = be32_to_cpu(imm_buf->len);
+ uint32_t req_size = imm_data_offset + len;
+
+ if (req_size > srp_max_req_size) {
+ pr_err("Immediate data (length %d + %d) exceeds request size %d\n",
+ imm_data_offset, len, srp_max_req_size);
+ return -EINVAL;
+ }
+ if (recv_ioctx->byte_len < req_size) {
+ pr_err("Received too few data - %d < %d\n",
+ recv_ioctx->byte_len, req_size);
+ return -EIO;
+ }
+ /*
+ * The immediate data buffer descriptor must occur before the
+ * immediate data itself.
+ */
+ if ((void *)(imm_buf + 1) > (void *)data) {
+ pr_err("Received invalid write request\n");
+ return -EINVAL;
+ }
+ *data_len = len;
+ ioctx->recv_ioctx = recv_ioctx;
+ if ((uintptr_t)data & 511) {
+ pr_warn_once("Internal error - the receive buffers are not aligned properly.\n");
+ return -EINVAL;
+ }
+ sg_init_one(&ioctx->imm_sg, data, len);
+ *sg = &ioctx->imm_sg;
+ *sg_cnt = 1;
+ return 0;
+ } else {
+ *data_len = 0;
+ return 0;
+ }
+}
+
+/**
+ * srpt_init_ch_qp - initialize queue pair attributes
+ * @ch: SRPT RDMA channel.
+ * @qp: Queue pair pointer.
+ *
+ * Initialized the attributes of queue pair 'qp' by allowing local write,
+ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
+ */
+static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr *attr;
+ int ret;
+
+ WARN_ON_ONCE(ch->using_rdma_cm);
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->qp_state = IB_QPS_INIT;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ attr->port_num = ch->sport->port;
+
+ ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
+ ch->pkey, &attr->pkey_index);
+ if (ret < 0)
+ pr_err("Translating pkey %#x failed (%d) - using index 0\n",
+ ch->pkey, ret);
+
+ ret = ib_modify_qp(qp, attr,
+ IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
+ IB_QP_PKEY_INDEX);
+
+ kfree(attr);
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ WARN_ON_ONCE(ch->using_rdma_cm);
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_dest_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_err - set the channel queue pair state to 'error'
+ * @ch: SRPT RDMA channel.
+ */
+static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_attr qp_attr;
+
+ qp_attr.qp_state = IB_QPS_ERR;
+ return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
+}
+
+/**
+ * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
+ * @ch: SRPT RDMA channel.
+ */
+static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
+{
+ struct srpt_send_ioctx *ioctx;
+ int tag, cpu;
+
+ BUG_ON(!ch);
+
+ tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ return NULL;
+
+ ioctx = ch->ioctx_ring[tag];
+ BUG_ON(ioctx->ch != ch);
+ ioctx->state = SRPT_STATE_NEW;
+ WARN_ON_ONCE(ioctx->recv_ioctx);
+ ioctx->n_rdma = 0;
+ ioctx->n_rw_ctx = 0;
+ ioctx->queue_status_only = false;
+ /*
+ * transport_init_se_cmd() does not initialize all fields, so do it
+ * here.
+ */
+ memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+ memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+ ioctx->cmd.map_tag = tag;
+ ioctx->cmd.map_cpu = cpu;
+
+ return ioctx;
+}
+
+/**
+ * srpt_abort_cmd - abort a SCSI command
+ * @ioctx: I/O context associated with the SCSI command.
+ */
+static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+
+ BUG_ON(!ioctx);
+
+ /*
+ * If the command is in a state where the target core is waiting for
+ * the ib_srpt driver, change the state to the next state.
+ */
+
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEED_DATA:
+ ioctx->state = SRPT_STATE_DATA_IN;
+ break;
+ case SRPT_STATE_CMD_RSP_SENT:
+ case SRPT_STATE_MGMT_RSP_SENT:
+ ioctx->state = SRPT_STATE_DONE;
+ break;
+ default:
+ WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
+ __func__, state);
+ break;
+ }
+
+ pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
+ ioctx->state, ioctx->cmd.tag);
+
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ case SRPT_STATE_MGMT:
+ case SRPT_STATE_DONE:
+ /*
+ * Do nothing - defer abort processing until
+ * srpt_queue_response() is invoked.
+ */
+ break;
+ case SRPT_STATE_NEED_DATA:
+ pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
+ transport_generic_request_failure(&ioctx->cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
+ break;
+ case SRPT_STATE_CMD_RSP_SENT:
+ /*
+ * SRP_RSP sending failed or the SRP_RSP send completion has
+ * not been received in time.
+ */
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+ break;
+ case SRPT_STATE_MGMT_RSP_SENT:
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+ break;
+ default:
+ WARN(1, "Unexpected command state (%d)", state);
+ break;
+ }
+
+ return state;
+}
+
+/**
+ * srpt_rdma_read_done - RDMA read completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
+ * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
+ * the data that has been transferred via IB RDMA had to be postponed until the
+ * check_stop_free() callback. None of this is necessary anymore and needs to
+ * be cleaned up.
+ */
+static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
+ struct srpt_send_ioctx *ioctx =
+ container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
+
+ WARN_ON(ioctx->n_rdma <= 0);
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ ioctx->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
+ ioctx, wc->status);
+ srpt_abort_cmd(ioctx);
+ return;
+ }
+
+ if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
+ SRPT_STATE_DATA_IN))
+ target_execute_cmd(&ioctx->cmd);
+ else
+ pr_err("%s[%d]: wrong state = %d\n", __func__,
+ __LINE__, ioctx->state);
+}
+
+/**
+ * srpt_build_cmd_rsp - build a SRP_RSP response
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context associated with the SRP_CMD request. The response will
+ * be built in the buffer ioctx->buf points at and hence this function will
+ * overwrite the request data.
+ * @tag: tag of the request for which this response is being generated.
+ * @status: value for the STATUS field of the SRP_RSP information unit.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response. See also SPC-2 for more information about sense data.
+ */
+static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, u64 tag,
+ int status)
+{
+ struct se_cmd *cmd = &ioctx->cmd;
+ struct srp_rsp *srp_rsp;
+ const u8 *sense_data;
+ int sense_data_len, max_sense_len;
+ u32 resid = cmd->residual_count;
+
+ /*
+ * The lowest bit of all SAM-3 status codes is zero (see also
+ * paragraph 5.3 in SAM-3).
+ */
+ WARN_ON(status & 1);
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+
+ sense_data = ioctx->sense_data;
+ sense_data_len = ioctx->cmd.scsi_sense_length;
+ WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
+
+ memset(srp_rsp, 0, sizeof(*srp_rsp));
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta =
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+ srp_rsp->status = status;
+
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ }
+
+ if (sense_data_len) {
+ BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
+ max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
+ if (sense_data_len > max_sense_len) {
+ pr_warn("truncated sense data from %d to %d bytes\n",
+ sense_data_len, max_sense_len);
+ sense_data_len = max_sense_len;
+ }
+
+ srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+ srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
+ memcpy(srp_rsp->data, sense_data, sense_data_len);
+ }
+
+ return sizeof(*srp_rsp) + sense_data_len;
+}
+
+/**
+ * srpt_build_tskmgmt_rsp - build a task management response
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context in which the SRP_RSP response will be built.
+ * @rsp_code: RSP_CODE that will be stored in the response.
+ * @tag: Tag of the request for which this response is being generated.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response.
+ */
+static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ u8 rsp_code, u64 tag)
+{
+ struct srp_rsp *srp_rsp;
+ int resp_data_len;
+ int resp_len;
+
+ resp_data_len = 4;
+ resp_len = sizeof(*srp_rsp) + resp_data_len;
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+ memset(srp_rsp, 0, sizeof(*srp_rsp));
+
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta =
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+
+ srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+ srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+ srp_rsp->data[3] = rsp_code;
+
+ return resp_len;
+}
+
+static int srpt_check_stop_free(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
+
+ return target_put_sess_cmd(&ioctx->cmd);
+}
+
+/**
+ * srpt_handle_cmd - process a SRP_CMD information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
+ */
+static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct se_cmd *cmd;
+ struct srp_cmd *srp_cmd;
+ struct scatterlist *sg = NULL;
+ unsigned sg_cnt = 0;
+ u64 data_len;
+ enum dma_data_direction dir;
+ int rc;
+
+ BUG_ON(!send_ioctx);
+
+ srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
+ cmd = &send_ioctx->cmd;
+ cmd->tag = srp_cmd->tag;
+
+ switch (srp_cmd->task_attr) {
+ case SRP_CMD_SIMPLE_Q:
+ cmd->sam_task_attr = TCM_SIMPLE_TAG;
+ break;
+ case SRP_CMD_ORDERED_Q:
+ default:
+ cmd->sam_task_attr = TCM_ORDERED_TAG;
+ break;
+ case SRP_CMD_HEAD_OF_Q:
+ cmd->sam_task_attr = TCM_HEAD_TAG;
+ break;
+ case SRP_CMD_ACA:
+ cmd->sam_task_attr = TCM_ACA_TAG;
+ break;
+ }
+
+ rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir,
+ &sg, &sg_cnt, &data_len, ch->imm_data_offset);
+ if (rc) {
+ if (rc != -EAGAIN) {
+ pr_err("0x%llx: parsing SRP descriptor table failed.\n",
+ srp_cmd->tag);
+ }
+ goto busy;
+ }
+
+ rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
+ scsilun_to_int(&srp_cmd->lun), data_len,
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
+ srp_cmd->tag);
+ goto busy;
+ }
+
+ if (target_submit_prep(cmd, srp_cmd->cdb, sg, sg_cnt, NULL, 0, NULL, 0,
+ GFP_KERNEL))
+ return;
+
+ target_submit(cmd);
+ return;
+
+busy:
+ target_send_busy(cmd);
+}
+
+static int srp_tmr_to_tcm(int fn)
+{
+ switch (fn) {
+ case SRP_TSK_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case SRP_TSK_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case SRP_TSK_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case SRP_TSK_LUN_RESET:
+ return TMR_LUN_RESET;
+ case SRP_TSK_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ default:
+ return -1;
+ }
+}
+
+/**
+ * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
+ *
+ * Returns 0 if and only if the request will be processed by the target core.
+ *
+ * For more information about SRP_TSK_MGMT information units, see also section
+ * 6.7 in the SRP r16a document.
+ */
+static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct srp_tsk_mgmt *srp_tsk;
+ struct se_cmd *cmd;
+ struct se_session *sess = ch->sess;
+ int tcm_tmr;
+ int rc;
+
+ BUG_ON(!send_ioctx);
+
+ srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
+ cmd = &send_ioctx->cmd;
+
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
+ srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
+ ch->sess);
+
+ srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ send_ioctx->cmd.tag = srp_tsk->tag;
+ tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+ rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
+ scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
+ GFP_KERNEL, srp_tsk->task_tag,
+ TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ cmd->se_tfo->queue_tm_rsp(cmd);
+ }
+ return;
+}
+
+/**
+ * srpt_handle_new_iu - process a newly received information unit
+ * @ch: RDMA channel through which the information unit has been received.
+ * @recv_ioctx: Receive I/O context associated with the information unit.
+ */
+static bool
+srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
+{
+ struct srpt_send_ioctx *send_ioctx = NULL;
+ struct srp_cmd *srp_cmd;
+ bool res = false;
+ u8 opcode;
+
+ BUG_ON(!ch);
+ BUG_ON(!recv_ioctx);
+
+ if (unlikely(ch->state == CH_CONNECTING))
+ goto push;
+
+ ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
+ recv_ioctx->ioctx.dma,
+ recv_ioctx->ioctx.offset + srp_max_req_size,
+ DMA_FROM_DEVICE);
+
+ srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
+ opcode = srp_cmd->opcode;
+ if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
+ send_ioctx = srpt_get_send_ioctx(ch);
+ if (unlikely(!send_ioctx))
+ goto push;
+ }
+
+ if (!list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(!ch->processing_wait_list);
+ list_del_init(&recv_ioctx->wait_list);
+ }
+
+ switch (opcode) {
+ case SRP_CMD:
+ srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_TSK_MGMT:
+ srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_I_LOGOUT:
+ pr_err("Not yet implemented: SRP_I_LOGOUT\n");
+ break;
+ case SRP_CRED_RSP:
+ pr_debug("received SRP_CRED_RSP\n");
+ break;
+ case SRP_AER_RSP:
+ pr_debug("received SRP_AER_RSP\n");
+ break;
+ case SRP_RSP:
+ pr_err("Received SRP_RSP\n");
+ break;
+ default:
+ pr_err("received IU with unknown opcode 0x%x\n", opcode);
+ break;
+ }
+
+ if (!send_ioctx || !send_ioctx->recv_ioctx)
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
+ res = true;
+
+out:
+ return res;
+
+push:
+ if (list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(ch->processing_wait_list);
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ }
+ goto out;
+}
+
+static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
+ struct srpt_recv_ioctx *ioctx =
+ container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
+
+ if (wc->status == IB_WC_SUCCESS) {
+ int req_lim;
+
+ req_lim = atomic_dec_return(&ch->req_lim);
+ if (unlikely(req_lim < 0))
+ pr_err("req_lim = %d < 0\n", req_lim);
+ ioctx->byte_len = wc->byte_len;
+ srpt_handle_new_iu(ch, ioctx);
+ } else {
+ pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
+ ioctx, wc->status);
+ }
+}
+
+/*
+ * This function must be called from the context in which RDMA completions are
+ * processed because it accesses the wait list without protection against
+ * access from other threads.
+ */
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
+{
+ struct srpt_recv_ioctx *recv_ioctx, *tmp;
+
+ WARN_ON_ONCE(ch->state == CH_CONNECTING);
+
+ if (list_empty(&ch->cmd_wait_list))
+ return;
+
+ WARN_ON_ONCE(ch->processing_wait_list);
+ ch->processing_wait_list = true;
+ list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
+ wait_list) {
+ if (!srpt_handle_new_iu(ch, recv_ioctx))
+ break;
+ }
+ ch->processing_wait_list = false;
+}
+
+/**
+ * srpt_send_done - send completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
+ * Note: Although this has not yet been observed during tests, at least in
+ * theory it is possible that the srpt_get_send_ioctx() call invoked by
+ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
+ * value in each response is set to one, and it is possible that this response
+ * makes the initiator send a new request before the send completion for that
+ * response has been processed. This could e.g. happen if the call to
+ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
+ * if IB retransmission causes generation of the send completion to be
+ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
+ * are queued on cmd_wait_list. The code below processes these delayed
+ * requests one at a time.
+ */
+static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srpt_rdma_ch *ch = wc->qp->qp_context;
+ struct srpt_send_ioctx *ioctx =
+ container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
+ enum srpt_command_state state;
+
+ state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+
+ WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
+ state != SRPT_STATE_MGMT_RSP_SENT);
+
+ atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
+
+ if (wc->status != IB_WC_SUCCESS)
+ pr_info("sending response for ioctx 0x%p failed with status %d\n",
+ ioctx, wc->status);
+
+ if (state != SRPT_STATE_DONE) {
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+ } else {
+ pr_err("IB completion has been received too late for wr_id = %u.\n",
+ ioctx->ioctx.index);
+ }
+
+ srpt_process_wait_list(ch);
+}
+
+/**
+ * srpt_create_ch_ib - create receive and send completion queues
+ * @ch: SRPT RDMA channel.
+ */
+static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_init_attr *qp_init;
+ struct srpt_port *sport = ch->sport;
+ struct srpt_device *sdev = sport->sdev;
+ const struct ib_device_attr *attrs = &sdev->device->attrs;
+ int sq_size = sport->port_attrib.srp_sq_size;
+ int i, ret;
+
+ WARN_ON(ch->rq_size < 1);
+
+ ret = -ENOMEM;
+ qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
+ if (!qp_init)
+ goto out;
+
+retry:
+ ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
+ IB_POLL_WORKQUEUE);
+ if (IS_ERR(ch->cq)) {
+ ret = PTR_ERR(ch->cq);
+ pr_err("failed to create CQ cqe= %d ret= %d\n",
+ ch->rq_size + sq_size, ret);
+ goto out;
+ }
+ ch->cq_size = ch->rq_size + sq_size;
+
+ qp_init->qp_context = (void *)ch;
+ qp_init->event_handler
+ = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->send_cq = ch->cq;
+ qp_init->recv_cq = ch->cq;
+ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_init->qp_type = IB_QPT_RC;
+ /*
+ * We divide up our send queue size into half SEND WRs to send the
+ * completions, and half R/W contexts to actually do the RDMA
+ * READ/WRITE transfers. Note that we need to allocate CQ slots for
+ * both both, as RDMA contexts will also post completions for the
+ * RDMA READ case.
+ */
+ qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
+ qp_init->cap.max_rdma_ctxs = sq_size / 2;
+ qp_init->cap.max_send_sge = attrs->max_send_sge;
+ qp_init->cap.max_recv_sge = 1;
+ qp_init->port_num = ch->sport->port;
+ if (sdev->use_srq)
+ qp_init->srq = sdev->srq;
+ else
+ qp_init->cap.max_recv_wr = ch->rq_size;
+
+ if (ch->using_rdma_cm) {
+ ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
+ ch->qp = ch->rdma_cm.cm_id->qp;
+ } else {
+ ch->qp = ib_create_qp(sdev->pd, qp_init);
+ if (!IS_ERR(ch->qp)) {
+ ret = srpt_init_ch_qp(ch, ch->qp);
+ if (ret)
+ ib_destroy_qp(ch->qp);
+ } else {
+ ret = PTR_ERR(ch->qp);
+ }
+ }
+ if (ret) {
+ bool retry = sq_size > MIN_SRPT_SQ_SIZE;
+
+ if (retry) {
+ pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
+ sq_size, ret);
+ ib_cq_pool_put(ch->cq, ch->cq_size);
+ sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
+ goto retry;
+ } else {
+ pr_err("failed to create queue pair with sq_size = %d (%d)\n",
+ sq_size, ret);
+ goto err_destroy_cq;
+ }
+ }
+
+ atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
+
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
+ __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
+ qp_init->cap.max_send_wr, ch);
+
+ if (!sdev->use_srq)
+ for (i = 0; i < ch->rq_size; i++)
+ srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
+
+out:
+ kfree(qp_init);
+ return ret;
+
+err_destroy_cq:
+ ch->qp = NULL;
+ ib_cq_pool_put(ch->cq, ch->cq_size);
+ goto out;
+}
+
+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
+{
+ ib_destroy_qp(ch->qp);
+ ib_cq_pool_put(ch->cq, ch->cq_size);
+}
+
+/**
+ * srpt_close_ch - close a RDMA channel
+ * @ch: SRPT RDMA channel.
+ *
+ * Make sure all resources associated with the channel will be deallocated at
+ * an appropriate time.
+ *
+ * Returns true if and only if the channel state has been modified into
+ * CH_DRAINING.
+ */
+static bool srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ int ret;
+
+ if (!srpt_set_ch_state(ch, CH_DRAINING)) {
+ pr_debug("%s: already closed\n", ch->sess_name);
+ return false;
+ }
+
+ kref_get(&ch->kref);
+
+ ret = srpt_ch_qp_err(ch);
+ if (ret < 0)
+ pr_err("%s-%d: changing queue pair into error state failed: %d\n",
+ ch->sess_name, ch->qp->qp_num, ret);
+
+ ret = srpt_zerolength_write(ch);
+ if (ret < 0) {
+ pr_err("%s-%d: queuing zero-length write failed: %d\n",
+ ch->sess_name, ch->qp->qp_num, ret);
+ if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+ schedule_work(&ch->release_work);
+ else
+ WARN_ON_ONCE(true);
+ }
+
+ kref_put(&ch->kref, srpt_free_ch);
+
+ return true;
+}
+
+/*
+ * Change the channel state into CH_DISCONNECTING. If a channel has not yet
+ * reached the connected state, close it. If a channel is in the connected
+ * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
+ * the responsibility of the caller to ensure that this function is not
+ * invoked concurrently with the code that accepts a connection. This means
+ * that this function must either be invoked from inside a CM callback
+ * function or that it must be invoked with the srpt_port.mutex held.
+ */
+static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
+{
+ int ret;
+
+ if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
+ return -ENOTCONN;
+
+ if (ch->using_rdma_cm) {
+ ret = rdma_disconnect(ch->rdma_cm.cm_id);
+ } else {
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
+ if (ret < 0)
+ ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
+ }
+
+ if (ret < 0 && srpt_close_ch(ch))
+ ret = 0;
+
+ return ret;
+}
+
+/* Send DREQ and wait for DREP. */
+static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
+{
+ DECLARE_COMPLETION_ONSTACK(closed);
+ struct srpt_port *sport = ch->sport;
+
+ pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
+ ch->state);
+
+ ch->closed = &closed;
+
+ mutex_lock(&sport->mutex);
+ srpt_disconnect_ch(ch);
+ mutex_unlock(&sport->mutex);
+
+ while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
+ pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
+ ch->sess_name, ch->qp->qp_num, ch->state);
+
+}
+
+static void __srpt_close_all_ch(struct srpt_port *sport)
+{
+ struct srpt_nexus *nexus;
+ struct srpt_rdma_ch *ch;
+
+ lockdep_assert_held(&sport->mutex);
+
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch) >= 0)
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
+ dev_name(&sport->sdev->device->dev),
+ sport->port);
+ srpt_close_ch(ch);
+ }
+ }
+}
+
+/*
+ * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
+ * it does not yet exist.
+ */
+static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
+ const u8 i_port_id[16],
+ const u8 t_port_id[16])
+{
+ struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
+
+ for (;;) {
+ mutex_lock(&sport->mutex);
+ list_for_each_entry(n, &sport->nexus_list, entry) {
+ if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
+ memcmp(n->t_port_id, t_port_id, 16) == 0) {
+ nexus = n;
+ break;
+ }
+ }
+ if (!nexus && tmp_nexus) {
+ list_add_tail_rcu(&tmp_nexus->entry,
+ &sport->nexus_list);
+ swap(nexus, tmp_nexus);
+ }
+ mutex_unlock(&sport->mutex);
+
+ if (nexus)
+ break;
+ tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
+ if (!tmp_nexus) {
+ nexus = ERR_PTR(-ENOMEM);
+ break;
+ }
+ INIT_LIST_HEAD(&tmp_nexus->ch_list);
+ memcpy(tmp_nexus->i_port_id, i_port_id, 16);
+ memcpy(tmp_nexus->t_port_id, t_port_id, 16);
+ }
+
+ kfree(tmp_nexus);
+
+ return nexus;
+}
+
+static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
+ __must_hold(&sport->mutex)
+{
+ lockdep_assert_held(&sport->mutex);
+
+ if (sport->enabled == enabled)
+ return;
+ sport->enabled = enabled;
+ if (!enabled)
+ __srpt_close_all_ch(sport);
+}
+
+static void srpt_drop_sport_ref(struct srpt_port *sport)
+{
+ if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
+ complete(sport->freed_channels);
+}
+
+static void srpt_free_ch(struct kref *kref)
+{
+ struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+
+ srpt_drop_sport_ref(ch->sport);
+ kfree_rcu(ch, rcu);
+}
+
+/*
+ * Shut down the SCSI target session, tell the connection manager to
+ * disconnect the associated RDMA channel, transition the QP to the error
+ * state and remove the channel from the channel list. This function is
+ * typically called from inside srpt_zerolength_write_done(). Concurrent
+ * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
+ * as long as the channel is on sport->nexus_list.
+ */
+static void srpt_release_channel_work(struct work_struct *w)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ struct se_session *se_sess;
+
+ ch = container_of(w, struct srpt_rdma_ch, release_work);
+ pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
+
+ sdev = ch->sport->sdev;
+ BUG_ON(!sdev);
+
+ se_sess = ch->sess;
+ BUG_ON(!se_sess);
+
+ target_stop_session(se_sess);
+ target_wait_for_sess_cmds(se_sess);
+
+ target_remove_session(se_sess);
+ ch->sess = NULL;
+
+ if (ch->using_rdma_cm)
+ rdma_destroy_id(ch->rdma_cm.cm_id);
+ else
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+
+ sport = ch->sport;
+ mutex_lock(&sport->mutex);
+ list_del_rcu(&ch->list);
+ mutex_unlock(&sport->mutex);
+
+ if (ch->closed)
+ complete(ch->closed);
+
+ srpt_destroy_ch_ib(ch);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
+
+ kmem_cache_destroy(ch->rsp_buf_cache);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ sdev, ch->rq_size,
+ ch->req_buf_cache, DMA_FROM_DEVICE);
+
+ kmem_cache_destroy(ch->req_buf_cache);
+
+ kref_put(&ch->kref, srpt_free_ch);
+}
+
+/**
+ * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
+ * @sdev: HCA through which the login request was received.
+ * @ib_cm_id: IB/CM connection identifier in case of IB/CM.
+ * @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM.
+ * @port_num: Port through which the REQ message was received.
+ * @pkey: P_Key of the incoming connection.
+ * @req: SRP login request.
+ * @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted
+ * the login request.
+ *
+ * Ownership of the cm_id is transferred to the target session if this
+ * function returns zero. Otherwise the caller remains the owner of cm_id.
+ */
+static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ struct ib_cm_id *ib_cm_id,
+ struct rdma_cm_id *rdma_cm_id,
+ u8 port_num, __be16 pkey,
+ const struct srp_login_req *req,
+ const char *src_addr)
+{
+ struct srpt_port *sport = &sdev->port[port_num - 1];
+ struct srpt_nexus *nexus;
+ struct srp_login_rsp *rsp = NULL;
+ struct srp_login_rej *rej = NULL;
+ union {
+ struct rdma_conn_param rdma_cm;
+ struct ib_cm_rep_param ib_cm;
+ } *rep_param = NULL;
+ struct srpt_rdma_ch *ch = NULL;
+ char i_port_id[36];
+ u32 it_iu_len;
+ int i, tag_num, tag_size, ret;
+ struct srpt_tpg *stpg;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ it_iu_len = be32_to_cpu(req->req_it_iu_len);
+
+ pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
+ req->initiator_port_id, req->target_port_id, it_iu_len,
+ port_num, &sport->gid, be16_to_cpu(pkey));
+
+ nexus = srpt_get_nexus(sport, req->initiator_port_id,
+ req->target_port_id);
+ if (IS_ERR(nexus)) {
+ ret = PTR_ERR(nexus);
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ rej = kzalloc(sizeof(*rej), GFP_KERNEL);
+ rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
+ if (!rsp || !rej || !rep_param)
+ goto out;
+
+ ret = -EINVAL;
+ if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
+ it_iu_len, 64, srp_max_req_size);
+ goto reject;
+ }
+
+ if (!sport->enabled) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
+ dev_name(&sport->sdev->device->dev), port_num);
+ goto reject;
+ }
+
+ if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
+ || *(__be64 *)(req->target_port_id + 8) !=
+ cpu_to_be64(srpt_service_guid)) {
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
+ goto reject;
+ }
+
+ ret = -ENOMEM;
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
+ goto reject;
+ }
+
+ kref_init(&ch->kref);
+ ch->pkey = be16_to_cpu(pkey);
+ ch->nexus = nexus;
+ ch->zw_cqe.done = srpt_zerolength_write_done;
+ INIT_WORK(&ch->release_work, srpt_release_channel_work);
+ ch->sport = sport;
+ if (rdma_cm_id) {
+ ch->using_rdma_cm = true;
+ ch->rdma_cm.cm_id = rdma_cm_id;
+ rdma_cm_id->context = ch;
+ } else {
+ ch->ib_cm.cm_id = ib_cm_id;
+ ib_cm_id->context = ch;
+ }
+ /*
+ * ch->rq_size should be at least as large as the initiator queue
+ * depth to avoid that the initiator driver has to report QUEUE_FULL
+ * to the SCSI mid-layer.
+ */
+ ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
+ spin_lock_init(&ch->spinlock);
+ ch->state = CH_CONNECTING;
+ INIT_LIST_HEAD(&ch->cmd_wait_list);
+ ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+ ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
+ 512, 0, NULL);
+ if (!ch->rsp_buf_cache)
+ goto free_ch;
+
+ ch->ioctx_ring = (struct srpt_send_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_ring[0]),
+ ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ goto free_rsp_cache;
+ }
+
+ for (i = 0; i < ch->rq_size; i++)
+ ch->ioctx_ring[i]->ch = ch;
+ if (!sdev->use_srq) {
+ u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
+ be16_to_cpu(req->imm_data_offset) : 0;
+ u16 alignment_offset;
+ u32 req_sz;
+
+ if (req->req_flags & SRP_IMMED_REQUESTED)
+ pr_debug("imm_data_offset = %d\n",
+ be16_to_cpu(req->imm_data_offset));
+ if (imm_data_offset >= sizeof(struct srp_cmd)) {
+ ch->imm_data_offset = imm_data_offset;
+ rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP;
+ } else {
+ ch->imm_data_offset = 0;
+ }
+ alignment_offset = round_up(imm_data_offset, 512) -
+ imm_data_offset;
+ req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
+ ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
+ 512, 0, NULL);
+ if (!ch->req_buf_cache)
+ goto free_rsp_ring;
+
+ ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_recv_ring[0]),
+ ch->req_buf_cache,
+ alignment_offset,
+ DMA_FROM_DEVICE);
+ if (!ch->ioctx_recv_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
+ rej->reason =
+ cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ goto free_recv_cache;
+ }
+ for (i = 0; i < ch->rq_size; i++)
+ INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
+ }
+
+ ret = srpt_create_ch_ib(ch);
+ if (ret) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
+ goto free_recv_ring;
+ }
+
+ strscpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
+ snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)nexus->i_port_id),
+ be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
+
+ pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
+ i_port_id);
+
+ tag_num = ch->rq_size;
+ tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
+
+ if (sport->guid_id) {
+ mutex_lock(&sport->guid_id->mutex);
+ list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ tag_size, TARGET_PROT_NORMAL,
+ ch->sess_name, ch, NULL);
+ }
+ mutex_unlock(&sport->guid_id->mutex);
+ }
+
+ if (sport->gid_id) {
+ mutex_lock(&sport->gid_id->mutex);
+ list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ tag_size, TARGET_PROT_NORMAL, i_port_id,
+ ch, NULL);
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
+ tag_size, TARGET_PROT_NORMAL,
+ i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->gid_id->mutex);
+ }
+
+ if (IS_ERR_OR_NULL(ch->sess)) {
+ WARN_ON_ONCE(ch->sess == NULL);
+ ret = PTR_ERR(ch->sess);
+ ch->sess = NULL;
+ pr_info("Rejected login for initiator %s: ret = %d.\n",
+ ch->sess_name, ret);
+ rej->reason = cpu_to_be32(ret == -ENOMEM ?
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto destroy_ib;
+ }
+
+ /*
+ * Once a session has been created destruction of srpt_rdma_ch objects
+ * will decrement sport->refcount. Hence increment sport->refcount now.
+ */
+ atomic_inc(&sport->refcount);
+
+ mutex_lock(&sport->mutex);
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ struct srpt_rdma_ch *ch2;
+
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch2) < 0)
+ continue;
+ pr_info("Relogin - closed existing channel %s\n",
+ ch2->sess_name);
+ rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ } else {
+ rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+ }
+
+ list_add_tail_rcu(&ch->list, &nexus->ch_list);
+
+ if (!sport->enabled) {
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
+ dev_name(&sdev->device->dev), port_num);
+ mutex_unlock(&sport->mutex);
+ ret = -EINVAL;
+ goto reject;
+ }
+
+ mutex_unlock(&sport->mutex);
+
+ ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
+ ret);
+ goto reject;
+ }
+
+ pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
+ ch->sess_name, ch);
+
+ /* create srp_login_response */
+ rsp->opcode = SRP_LOGIN_RSP;
+ rsp->tag = req->tag;
+ rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size);
+ rsp->max_ti_iu_len = req->req_it_iu_len;
+ ch->max_ti_iu_len = it_iu_len;
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
+ rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
+ atomic_set(&ch->req_lim, ch->rq_size);
+ atomic_set(&ch->req_lim_delta, 0);
+
+ /* create cm reply */
+ if (ch->using_rdma_cm) {
+ rep_param->rdma_cm.private_data = (void *)rsp;
+ rep_param->rdma_cm.private_data_len = sizeof(*rsp);
+ rep_param->rdma_cm.rnr_retry_count = 7;
+ rep_param->rdma_cm.flow_control = 1;
+ rep_param->rdma_cm.responder_resources = 4;
+ rep_param->rdma_cm.initiator_depth = 4;
+ } else {
+ rep_param->ib_cm.qp_num = ch->qp->qp_num;
+ rep_param->ib_cm.private_data = (void *)rsp;
+ rep_param->ib_cm.private_data_len = sizeof(*rsp);
+ rep_param->ib_cm.rnr_retry_count = 7;
+ rep_param->ib_cm.flow_control = 1;
+ rep_param->ib_cm.failover_accepted = 0;
+ rep_param->ib_cm.srq = 1;
+ rep_param->ib_cm.responder_resources = 4;
+ rep_param->ib_cm.initiator_depth = 4;
+ }
+
+ /*
+ * Hold the sport mutex while accepting a connection to avoid that
+ * srpt_disconnect_ch() is invoked concurrently with this code.
+ */
+ mutex_lock(&sport->mutex);
+ if (sport->enabled && ch->state == CH_CONNECTING) {
+ if (ch->using_rdma_cm)
+ ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
+ else
+ ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
+ } else {
+ ret = -EINVAL;
+ }
+ mutex_unlock(&sport->mutex);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ goto reject;
+ default:
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
+ ret);
+ goto reject;
+ }
+
+ goto out;
+
+destroy_ib:
+ srpt_destroy_ch_ib(ch);
+
+free_recv_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->req_buf_cache, DMA_FROM_DEVICE);
+
+free_recv_cache:
+ kmem_cache_destroy(ch->req_buf_cache);
+
+free_rsp_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
+
+free_rsp_cache:
+ kmem_cache_destroy(ch->rsp_buf_cache);
+
+free_ch:
+ if (rdma_cm_id)
+ rdma_cm_id->context = NULL;
+ else
+ ib_cm_id->context = NULL;
+ kfree(ch);
+ ch = NULL;
+
+ WARN_ON_ONCE(ret == 0);
+
+reject:
+ pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
+ rej->opcode = SRP_LOGIN_REJ;
+ rej->tag = req->tag;
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
+
+ if (rdma_cm_id)
+ rdma_reject(rdma_cm_id, rej, sizeof(*rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
+ else
+ ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+ rej, sizeof(*rej));
+
+ if (ch && ch->sess) {
+ srpt_close_ch(ch);
+ /*
+ * Tell the caller not to free cm_id since
+ * srpt_release_channel_work() will do that.
+ */
+ ret = 0;
+ }
+
+out:
+ kfree(rep_param);
+ kfree(rsp);
+ kfree(rej);
+
+ return ret;
+}
+
+static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
+ const struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ char sguid[40];
+
+ srpt_format_guid(sguid, sizeof(sguid),
+ &param->primary_path->dgid.global.interface_id);
+
+ return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
+ param->primary_path->pkey,
+ private_data, sguid);
+}
+
+static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct srpt_device *sdev;
+ struct srp_login_req req;
+ const struct srp_login_req_rdma *req_rdma;
+ struct sa_path_rec *path_rec = cm_id->route.path_rec;
+ char src_addr[40];
+
+ sdev = ib_get_client_data(cm_id->device, &srpt_client);
+ if (!sdev)
+ return -ECONNREFUSED;
+
+ if (event->param.conn.private_data_len < sizeof(*req_rdma))
+ return -EINVAL;
+
+ /* Transform srp_login_req_rdma into srp_login_req. */
+ req_rdma = event->param.conn.private_data;
+ memset(&req, 0, sizeof(req));
+ req.opcode = req_rdma->opcode;
+ req.tag = req_rdma->tag;
+ req.req_it_iu_len = req_rdma->req_it_iu_len;
+ req.req_buf_fmt = req_rdma->req_buf_fmt;
+ req.req_flags = req_rdma->req_flags;
+ memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
+ memcpy(req.target_port_id, req_rdma->target_port_id, 16);
+ req.imm_data_offset = req_rdma->imm_data_offset;
+
+ snprintf(src_addr, sizeof(src_addr), "%pIS",
+ &cm_id->route.addr.src_addr);
+
+ return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
+ path_rec ? path_rec->pkey : 0, &req, src_addr);
+}
+
+static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
+ enum ib_cm_rej_reason reason,
+ const u8 *private_data,
+ u8 private_data_len)
+{
+ char *priv = NULL;
+ int i;
+
+ if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
+ GFP_KERNEL))) {
+ for (i = 0; i < private_data_len; i++)
+ sprintf(priv + 3 * i, " %02x", private_data[i]);
+ }
+ pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
+ ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
+ "; private data" : "", priv ? priv : " (?)");
+ kfree(priv);
+}
+
+/**
+ * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
+ * @ch: SRPT RDMA channel.
+ *
+ * An RTU (ready to use) message indicates that the connection has been
+ * established and that the recipient may begin transmitting.
+ */
+static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
+{
+ int ret;
+
+ ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
+ if (ret < 0) {
+ pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
+ ch->qp->qp_num);
+ srpt_close_ch(ch);
+ return;
+ }
+
+ /*
+ * Note: calling srpt_close_ch() if the transition to the LIVE state
+ * fails is not necessary since that means that that function has
+ * already been invoked from another thread.
+ */
+ if (!srpt_set_ch_state(ch, CH_LIVE)) {
+ pr_err("%s-%d: channel transition to LIVE state failed\n",
+ ch->sess_name, ch->qp->qp_num);
+ return;
+ }
+
+ /* Trigger wait list processing. */
+ ret = srpt_zerolength_write(ch);
+ WARN_ONCE(ret < 0, "%d\n", ret);
+}
+
+/**
+ * srpt_cm_handler - IB connection manager callback function
+ * @cm_id: IB/CM connection identifier.
+ * @event: IB/CM event.
+ *
+ * A non-zero return value will cause the caller destroy the CM ID.
+ *
+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
+ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
+ * a non-zero value in any other case will trigger a race with the
+ * ib_destroy_cm_id() call in srpt_release_channel().
+ */
+static int srpt_cm_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct srpt_rdma_ch *ch = cm_id->context;
+ int ret;
+
+ ret = 0;
+ switch (event->event) {
+ case IB_CM_REQ_RECEIVED:
+ ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
+ break;
+ case IB_CM_REJ_RECEIVED:
+ srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
+ event->private_data,
+ IB_CM_REJ_PRIVATE_DATA_SIZE);
+ break;
+ case IB_CM_RTU_RECEIVED:
+ case IB_CM_USER_ESTABLISHED:
+ srpt_cm_rtu_recv(ch);
+ break;
+ case IB_CM_DREQ_RECEIVED:
+ srpt_disconnect_ch(ch);
+ break;
+ case IB_CM_DREP_RECEIVED:
+ pr_info("Received CM DREP message for ch %s-%d.\n",
+ ch->sess_name, ch->qp->qp_num);
+ srpt_close_ch(ch);
+ break;
+ case IB_CM_TIMEWAIT_EXIT:
+ pr_info("Received CM TimeWait exit for ch %s-%d.\n",
+ ch->sess_name, ch->qp->qp_num);
+ srpt_close_ch(ch);
+ break;
+ case IB_CM_REP_ERROR:
+ pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
+ ch->qp->qp_num);
+ break;
+ case IB_CM_DREQ_ERROR:
+ pr_info("Received CM DREQ ERROR event.\n");
+ break;
+ case IB_CM_MRA_RECEIVED:
+ pr_info("Received CM MRA event\n");
+ break;
+ default:
+ pr_err("received unrecognized CM event %d\n", event->event);
+ break;
+ }
+
+ return ret;
+}
+
+static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct srpt_rdma_ch *ch = cm_id->context;
+ int ret = 0;
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ ret = srpt_rdma_cm_req_recv(cm_id, event);
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ srpt_cm_rej_recv(ch, event->status,
+ event->param.conn.private_data,
+ event->param.conn.private_data_len);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ srpt_cm_rtu_recv(ch);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ if (ch->state < CH_DISCONNECTING)
+ srpt_disconnect_ch(ch);
+ else
+ srpt_close_ch(ch);
+ break;
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ srpt_close_ch(ch);
+ break;
+ case RDMA_CM_EVENT_UNREACHABLE:
+ pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
+ ch->qp->qp_num);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ break;
+ default:
+ pr_err("received unrecognized RDMA CM event %d\n",
+ event->event);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * srpt_write_pending - Start data transfer from initiator to target (write).
+ */
+static int srpt_write_pending(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx =
+ container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct ib_send_wr *first_wr = NULL;
+ struct ib_cqe *cqe = &ioctx->rdma_cqe;
+ enum srpt_command_state new_state;
+ int ret, i;
+
+ if (ioctx->recv_ioctx) {
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
+ target_execute_cmd(&ioctx->cmd);
+ return 0;
+ }
+
+ new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
+ WARN_ON(new_state == SRPT_STATE_DONE);
+
+ if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
+ pr_warn("%s: IB send queue full (needed %d)\n",
+ __func__, ioctx->n_rdma);
+ ret = -ENOMEM;
+ goto out_undo;
+ }
+
+ cqe->done = srpt_rdma_read_done;
+ for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
+ cqe, first_wr);
+ cqe = NULL;
+ }
+
+ ret = ib_post_send(ch->qp, first_wr, NULL);
+ if (ret) {
+ pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
+ __func__, ret, ioctx->n_rdma,
+ atomic_read(&ch->sq_wr_avail));
+ goto out_undo;
+ }
+
+ return 0;
+out_undo:
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ return ret;
+}
+
+static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
+{
+ switch (tcm_mgmt_status) {
+ case TMR_FUNCTION_COMPLETE:
+ return SRP_TSK_MGMT_SUCCESS;
+ case TMR_FUNCTION_REJECTED:
+ return SRP_TSK_MGMT_FUNC_NOT_SUPP;
+ }
+ return SRP_TSK_MGMT_FAILED;
+}
+
+/**
+ * srpt_queue_response - transmit the response to a SCSI command
+ * @cmd: SCSI target command.
+ *
+ * Callback function called by the TCM core. Must not block since it can be
+ * invoked on the context of the IB completion handler.
+ */
+static void srpt_queue_response(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx =
+ container_of(cmd, struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct srpt_device *sdev = ch->sport->sdev;
+ struct ib_send_wr send_wr, *first_wr = &send_wr;
+ struct ib_sge sge;
+ enum srpt_command_state state;
+ int resp_len, ret, i;
+ u8 srp_tm_status;
+
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ ioctx->state = SRPT_STATE_CMD_RSP_SENT;
+ break;
+ case SRPT_STATE_MGMT:
+ ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
+ break;
+ default:
+ WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
+ ch, ioctx->ioctx.index, ioctx->state);
+ break;
+ }
+
+ if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
+ return;
+
+ /* For read commands, transfer the data to the initiator. */
+ if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
+ ioctx->cmd.data_length &&
+ !ioctx->queue_status_only) {
+ for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
+ ch->sport->port, NULL, first_wr);
+ }
+ }
+
+ if (state != SRPT_STATE_MGMT)
+ resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
+ cmd->scsi_status);
+ else {
+ srp_tm_status
+ = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
+ resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
+ ioctx->cmd.tag);
+ }
+
+ atomic_inc(&ch->req_lim);
+
+ if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
+ &ch->sq_wr_avail) < 0)) {
+ pr_warn("%s: IB send queue full (needed %d)\n",
+ __func__, ioctx->n_rdma);
+ goto out;
+ }
+
+ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
+ DMA_TO_DEVICE);
+
+ sge.addr = ioctx->ioctx.dma;
+ sge.length = resp_len;
+ sge.lkey = sdev->lkey;
+
+ ioctx->ioctx.cqe.done = srpt_send_done;
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &ioctx->ioctx.cqe;
+ send_wr.sg_list = &sge;
+ send_wr.num_sge = 1;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, first_wr, NULL);
+ if (ret < 0) {
+ pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
+ __func__, ioctx->cmd.tag, ret);
+ goto out;
+ }
+
+ return;
+
+out:
+ atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
+ atomic_dec(&ch->req_lim);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ target_put_sess_cmd(&ioctx->cmd);
+}
+
+static int srpt_queue_data_in(struct se_cmd *cmd)
+{
+ srpt_queue_response(cmd);
+ return 0;
+}
+
+static void srpt_queue_tm_rsp(struct se_cmd *cmd)
+{
+ srpt_queue_response(cmd);
+}
+
+/*
+ * This function is called for aborted commands if no response is sent to the
+ * initiator. Make sure that the credits freed by aborting a command are
+ * returned to the initiator the next time a response is sent by incrementing
+ * ch->req_lim_delta.
+ */
+static void srpt_aborted_task(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+
+ atomic_inc(&ch->req_lim_delta);
+}
+
+static int srpt_queue_status(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ BUG_ON(ioctx->sense_data != cmd->sense_buffer);
+ if (cmd->se_cmd_flags &
+ (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
+ WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
+ ioctx->queue_status_only = true;
+ srpt_queue_response(cmd);
+ return 0;
+}
+
+static void srpt_refresh_port_work(struct work_struct *work)
+{
+ struct srpt_port *sport = container_of(work, struct srpt_port, work);
+
+ srpt_refresh_port(sport);
+}
+
+/**
+ * srpt_release_sport - disable login and wait for associated channels
+ * @sport: SRPT HCA port.
+ */
+static int srpt_release_sport(struct srpt_port *sport)
+{
+ DECLARE_COMPLETION_ONSTACK(c);
+ struct srpt_nexus *nexus, *next_n;
+ struct srpt_rdma_ch *ch;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ sport->freed_channels = &c;
+
+ mutex_lock(&sport->mutex);
+ srpt_set_enabled(sport, false);
+ mutex_unlock(&sport->mutex);
+
+ while (atomic_read(&sport->refcount) > 0 &&
+ wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ atomic_read(&sport->refcount));
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ pr_info("%s-%d: state %s\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+ }
+ }
+ rcu_read_unlock();
+ }
+
+ mutex_lock(&sport->mutex);
+ list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
+ list_del(&nexus->entry);
+ kfree_rcu(nexus, rcu);
+ }
+ mutex_unlock(&sport->mutex);
+
+ return 0;
+}
+
+struct port_and_port_id {
+ struct srpt_port *sport;
+ struct srpt_port_id **port_id;
+};
+
+static struct port_and_port_id __srpt_lookup_port(const char *name)
+{
+ struct ib_device *dev;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ int i;
+
+ list_for_each_entry(sdev, &srpt_dev_list, list) {
+ dev = sdev->device;
+ if (!dev)
+ continue;
+
+ for (i = 0; i < dev->phys_port_cnt; i++) {
+ sport = &sdev->port[i];
+
+ if (strcmp(sport->guid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->guid_id};
+ }
+ if (strcmp(sport->gid_name, name) == 0) {
+ kref_get(&sdev->refcnt);
+ return (struct port_and_port_id){
+ sport, &sport->gid_id};
+ }
+ }
+ }
+
+ return (struct port_and_port_id){};
+}
+
+/**
+ * srpt_lookup_port() - Look up an RDMA port by name
+ * @name: ASCII port name
+ *
+ * Increments the RDMA port reference count if an RDMA port pointer is returned.
+ * The caller must drop that reference count by calling srpt_port_put_ref().
+ */
+static struct port_and_port_id srpt_lookup_port(const char *name)
+{
+ struct port_and_port_id papi;
+
+ spin_lock(&srpt_dev_lock);
+ papi = __srpt_lookup_port(name);
+ spin_unlock(&srpt_dev_lock);
+
+ return papi;
+}
+
+static void srpt_free_srq(struct srpt_device *sdev)
+{
+ if (!sdev->srq)
+ return;
+
+ ib_destroy_srq(sdev->srq);
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, sdev->req_buf_cache,
+ DMA_FROM_DEVICE);
+ kmem_cache_destroy(sdev->req_buf_cache);
+ sdev->srq = NULL;
+}
+
+static int srpt_alloc_srq(struct srpt_device *sdev)
+{
+ struct ib_srq_init_attr srq_attr = {
+ .event_handler = srpt_srq_event,
+ .srq_context = (void *)sdev,
+ .attr.max_wr = sdev->srq_size,
+ .attr.max_sge = 1,
+ .srq_type = IB_SRQT_BASIC,
+ };
+ struct ib_device *device = sdev->device;
+ struct ib_srq *srq;
+ int i;
+
+ WARN_ON_ONCE(sdev->srq);
+ srq = ib_create_srq(sdev->pd, &srq_attr);
+ if (IS_ERR(srq)) {
+ pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
+ return PTR_ERR(srq);
+ }
+
+ pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
+
+ sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
+ srp_max_req_size, 0, 0, NULL);
+ if (!sdev->req_buf_cache)
+ goto free_srq;
+
+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+ sizeof(*sdev->ioctx_ring[0]),
+ sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
+ if (!sdev->ioctx_ring)
+ goto free_cache;
+
+ sdev->use_srq = true;
+ sdev->srq = srq;
+
+ for (i = 0; i < sdev->srq_size; ++i) {
+ INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
+ srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+ }
+
+ return 0;
+
+free_cache:
+ kmem_cache_destroy(sdev->req_buf_cache);
+
+free_srq:
+ ib_destroy_srq(srq);
+ return -ENOMEM;
+}
+
+static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
+{
+ struct ib_device *device = sdev->device;
+ int ret = 0;
+
+ if (!use_srq) {
+ srpt_free_srq(sdev);
+ sdev->use_srq = false;
+ } else if (use_srq && !sdev->srq) {
+ ret = srpt_alloc_srq(sdev);
+ }
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
+ dev_name(&device->dev), sdev->use_srq, ret);
+ return ret;
+}
+
+static void srpt_free_sdev(struct kref *refcnt)
+{
+ struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
+
+ kfree(sdev);
+}
+
+static void srpt_sdev_put(struct srpt_device *sdev)
+{
+ kref_put(&sdev->refcnt, srpt_free_sdev);
+}
+
+/**
+ * srpt_add_one - InfiniBand device addition callback function
+ * @device: Describes a HCA.
+ */
+static int srpt_add_one(struct ib_device *device)
+{
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ int ret;
+ u32 i;
+
+ pr_debug("device = %p\n", device);
+
+ sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
+ GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ kref_init(&sdev->refcnt);
+ sdev->device = device;
+ mutex_init(&sdev->sdev_mutex);
+
+ sdev->pd = ib_alloc_pd(device, 0);
+ if (IS_ERR(sdev->pd)) {
+ ret = PTR_ERR(sdev->pd);
+ goto free_dev;
+ }
+
+ sdev->lkey = sdev->pd->local_dma_lkey;
+
+ sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
+
+ srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
+
+ if (!srpt_service_guid)
+ srpt_service_guid = be64_to_cpu(device->node_guid);
+
+ if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
+ sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
+ if (IS_ERR(sdev->cm_id)) {
+ pr_info("ib_create_cm_id() failed: %ld\n",
+ PTR_ERR(sdev->cm_id));
+ ret = PTR_ERR(sdev->cm_id);
+ sdev->cm_id = NULL;
+ if (!rdma_cm_id)
+ goto err_ring;
+ }
+
+ /* print out target login information */
+ pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
+ srpt_service_guid, srpt_service_guid, srpt_service_guid);
+
+ /*
+ * We do not have a consistent service_id (ie. also id_ext of target_id)
+ * to identify this target. We currently use the guid of the first HCA
+ * in the system as service_id; therefore, the target_id will change
+ * if this HCA is gone bad and replaced by different HCA
+ */
+ ret = sdev->cm_id ?
+ ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid)) :
+ 0;
+ if (ret < 0) {
+ pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
+ sdev->cm_id->state);
+ goto err_cm;
+ }
+
+ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+ srpt_event_handler);
+ ib_register_event_handler(&sdev->event_handler);
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ INIT_LIST_HEAD(&sport->nexus_list);
+ mutex_init(&sport->mutex);
+ sport->sdev = sdev;
+ sport->port = i;
+ sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
+ sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
+ sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+ sport->port_attrib.use_srq = false;
+ INIT_WORK(&sport->work, srpt_refresh_port_work);
+
+ ret = srpt_refresh_port(sport);
+ if (ret) {
+ pr_err("MAD registration failed for %s-%d.\n",
+ dev_name(&sdev->device->dev), i);
+ i--;
+ goto err_port;
+ }
+ }
+
+ spin_lock(&srpt_dev_lock);
+ list_add_tail(&sdev->list, &srpt_dev_list);
+ spin_unlock(&srpt_dev_lock);
+
+ ib_set_client_data(device, &srpt_client, sdev);
+ pr_debug("added %s.\n", dev_name(&device->dev));
+ return 0;
+
+err_port:
+ srpt_unregister_mad_agent(sdev, i);
+ ib_unregister_event_handler(&sdev->event_handler);
+err_cm:
+ if (sdev->cm_id)
+ ib_destroy_cm_id(sdev->cm_id);
+err_ring:
+ srpt_free_srq(sdev);
+ ib_dealloc_pd(sdev->pd);
+free_dev:
+ srpt_sdev_put(sdev);
+ pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
+ return ret;
+}
+
+/**
+ * srpt_remove_one - InfiniBand device removal callback function
+ * @device: Describes a HCA.
+ * @client_data: The value passed as the third argument to ib_set_client_data().
+ */
+static void srpt_remove_one(struct ib_device *device, void *client_data)
+{
+ struct srpt_device *sdev = client_data;
+ int i;
+
+ srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
+
+ ib_unregister_event_handler(&sdev->event_handler);
+
+ /* Cancel any work queued by the just unregistered IB event handler. */
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ cancel_work_sync(&sdev->port[i].work);
+
+ if (sdev->cm_id)
+ ib_destroy_cm_id(sdev->cm_id);
+
+ ib_set_client_data(device, &srpt_client, NULL);
+
+ /*
+ * Unregistering a target must happen after destroying sdev->cm_id
+ * such that no new SRP_LOGIN_REQ information units can arrive while
+ * destroying the target.
+ */
+ spin_lock(&srpt_dev_lock);
+ list_del(&sdev->list);
+ spin_unlock(&srpt_dev_lock);
+
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ srpt_release_sport(&sdev->port[i]);
+
+ srpt_free_srq(sdev);
+
+ ib_dealloc_pd(sdev->pd);
+
+ srpt_sdev_put(sdev);
+}
+
+static struct ib_client srpt_client = {
+ .name = DRV_NAME,
+ .add = srpt_add_one,
+ .remove = srpt_remove_one
+};
+
+static int srpt_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int srpt_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
+{
+ return tpg->se_tpg_wwn->priv;
+}
+
+static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = wwn->priv;
+
+ if (sport->guid_id && &sport->guid_id->wwn == wwn)
+ return sport->guid_id;
+ if (sport->gid_id && &sport->gid_id->wwn == wwn)
+ return sport->gid_id;
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
+static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
+{
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
+
+ return stpg->sport_id->name;
+}
+
+static u16 srpt_get_tag(struct se_portal_group *tpg)
+{
+ return 1;
+}
+
+static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void srpt_release_cmd(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx = container_of(se_cmd,
+ struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
+
+ WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
+ !(ioctx->cmd.transport_state & CMD_T_ABORTED));
+
+ if (recv_ioctx) {
+ WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list));
+ ioctx->recv_ioctx = NULL;
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
+ }
+
+ if (ioctx->n_rw_ctx) {
+ srpt_free_rw_ctxs(ch, ioctx);
+ ioctx->n_rw_ctx = 0;
+ }
+
+ target_free_tag(se_cmd->se_sess, se_cmd);
+}
+
+/**
+ * srpt_close_session - forcibly close a session
+ * @se_sess: SCSI target session.
+ *
+ * Callback function invoked by the TCM core to clean up sessions associated
+ * with a node ACL when the user invokes
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_close_session(struct se_session *se_sess)
+{
+ struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+
+ srpt_disconnect_ch_sync(ch);
+}
+
+/**
+ * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
+ * @se_sess: SCSI target session.
+ *
+ * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
+ * This object represents an arbitrary integer used to uniquely identify a
+ * particular attached remote initiator port to a particular SCSI target port
+ * within a particular SCSI target device within a particular SCSI instance.
+ */
+static u32 srpt_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+/* Note: only used from inside debug printk's by the TCM core. */
+static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return ioctx->state;
+}
+
+static int srpt_parse_guid(u64 *guid, const char *name)
+{
+ u16 w[4];
+ int ret = -EINVAL;
+
+ if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
+ goto out;
+ *guid = get_unaligned_be64(w);
+ ret = 0;
+out:
+ return ret;
+}
+
+/**
+ * srpt_parse_i_port_id - parse an initiator port ID
+ * @name: ASCII representation of a 128-bit initiator port ID.
+ * @i_port_id: Binary 128-bit port ID.
+ */
+static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+{
+ const char *p;
+ unsigned len, count, leading_zero_bytes;
+ int ret;
+
+ p = name;
+ if (strncasecmp(p, "0x", 2) == 0)
+ p += 2;
+ ret = -EINVAL;
+ len = strlen(p);
+ if (len % 2)
+ goto out;
+ count = min(len / 2, 16U);
+ leading_zero_bytes = 16 - count;
+ memset(i_port_id, 0, leading_zero_bytes);
+ ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
+
+out:
+ return ret;
+}
+
+/*
+ * configfs callback function invoked for mkdir
+ * /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ *
+ * i_port_id must be an initiator port GUID, GID or IP address. See also the
+ * target_alloc_session() calls in this driver. Examples of valid initiator
+ * port IDs:
+ * 0x0000000000000000505400fffe4a0b7b
+ * 0000000000000000505400fffe4a0b7b
+ * 5054:00ff:fe4a:0b7b
+ * 192.168.122.76
+ */
+static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
+{
+ struct sockaddr_storage sa;
+ u64 guid;
+ u8 i_port_id[16];
+ int ret;
+
+ ret = srpt_parse_guid(&guid, name);
+ if (ret < 0)
+ ret = srpt_parse_i_port_id(i_port_id, name);
+ if (ret < 0)
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
+ &sa);
+ if (ret < 0)
+ pr_err("invalid initiator port ID %s\n", name);
+ return ret;
+}
+
+static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
+}
+
+static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RDMA_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
+ MAX_SRPT_RDMA_SIZE);
+ return -EINVAL;
+ }
+ if (val < DEFAULT_MAX_RDMA_SIZE) {
+ pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
+ val, DEFAULT_MAX_RDMA_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rdma_size = val;
+
+ return count;
+}
+
+static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
+}
+
+static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RSP_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
+ MAX_SRPT_RSP_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_MAX_RSP_SIZE) {
+ pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
+ MIN_MAX_RSP_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rsp_size = val;
+
+ return count;
+}
+
+static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ return sysfs_emit(page, "%u\n", sport->port_attrib.srp_sq_size);
+}
+
+static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
+ MAX_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
+ MIN_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_sq_size = val;
+
+ return count;
+}
+
+static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ return sysfs_emit(page, "%d\n", sport->port_attrib.use_srq);
+}
+
+static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+ struct srpt_device *sdev = sport->sdev;
+ unsigned long val;
+ bool enabled;
+ int ret;
+
+ ret = kstrtoul(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val != !!val)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&sdev->sdev_mutex);
+ if (ret < 0)
+ return ret;
+ ret = mutex_lock_interruptible(&sport->mutex);
+ if (ret < 0)
+ goto unlock_sdev;
+ enabled = sport->enabled;
+ /* Log out all initiator systems before changing 'use_srq'. */
+ srpt_set_enabled(sport, false);
+ sport->port_attrib.use_srq = val;
+ srpt_use_srq(sdev, sport->port_attrib.use_srq);
+ srpt_set_enabled(sport, enabled);
+ ret = count;
+ mutex_unlock(&sport->mutex);
+unlock_sdev:
+ mutex_unlock(&sdev->sdev_mutex);
+
+ return ret;
+}
+
+CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
+CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
+CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
+CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
+
+static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
+ &srpt_tpg_attrib_attr_srp_max_rdma_size,
+ &srpt_tpg_attrib_attr_srp_max_rsp_size,
+ &srpt_tpg_attrib_attr_srp_sq_size,
+ &srpt_tpg_attrib_attr_use_srq,
+ NULL,
+};
+
+static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
+{
+ struct rdma_cm_id *rdma_cm_id;
+ int ret;
+
+ rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
+ NULL, RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(rdma_cm_id)) {
+ pr_err("RDMA/CM ID creation failed: %ld\n",
+ PTR_ERR(rdma_cm_id));
+ goto out;
+ }
+
+ ret = rdma_bind_addr(rdma_cm_id, listen_addr);
+ if (ret) {
+ char addr_str[64];
+
+ snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr);
+ pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
+ addr_str, ret);
+ rdma_destroy_id(rdma_cm_id);
+ rdma_cm_id = ERR_PTR(ret);
+ goto out;
+ }
+
+ ret = rdma_listen(rdma_cm_id, 128);
+ if (ret) {
+ pr_err("rdma_listen() failed: %d\n", ret);
+ rdma_destroy_id(rdma_cm_id);
+ rdma_cm_id = ERR_PTR(ret);
+ }
+
+out:
+ return rdma_cm_id;
+}
+
+static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%d\n", rdma_cm_port);
+}
+
+static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct sockaddr_in addr4 = { .sin_family = AF_INET };
+ struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 };
+ struct rdma_cm_id *new_id = NULL;
+ u16 val;
+ int ret;
+
+ ret = kstrtou16(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ ret = count;
+ if (rdma_cm_port == val)
+ goto out;
+
+ if (val) {
+ addr6.sin6_port = cpu_to_be16(val);
+ new_id = srpt_create_rdma_id((struct sockaddr *)&addr6);
+ if (IS_ERR(new_id)) {
+ addr4.sin_port = cpu_to_be16(val);
+ new_id = srpt_create_rdma_id((struct sockaddr *)&addr4);
+ if (IS_ERR(new_id)) {
+ ret = PTR_ERR(new_id);
+ goto out;
+ }
+ }
+ }
+
+ mutex_lock(&rdma_cm_mutex);
+ rdma_cm_port = val;
+ swap(rdma_cm_id, new_id);
+ mutex_unlock(&rdma_cm_mutex);
+
+ if (new_id)
+ rdma_destroy_id(new_id);
+ ret = count;
+out:
+ return ret;
+}
+
+CONFIGFS_ATTR(srpt_, rdma_cm_port);
+
+static struct configfs_attribute *srpt_da_attrs[] = {
+ &srpt_attr_rdma_cm_port,
+ NULL,
+};
+
+static int srpt_enable_tpg(struct se_portal_group *se_tpg, bool enable)
+{
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ mutex_lock(&sport->mutex);
+ srpt_set_enabled(sport, enable);
+ mutex_unlock(&sport->mutex);
+
+ return 0;
+}
+
+/**
+ * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @wwn: Corresponds to $driver/$port.
+ * @name: $tpg.
+ */
+static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
+ const char *name)
+{
+ struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
+ struct srpt_tpg *stpg;
+ int res = -ENOMEM;
+
+ stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
+ if (!stpg)
+ return ERR_PTR(res);
+ stpg->sport_id = sport_id;
+ res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
+ if (res) {
+ kfree(stpg);
+ return ERR_PTR(res);
+ }
+
+ mutex_lock(&sport_id->mutex);
+ list_add_tail(&stpg->entry, &sport_id->tpg_list);
+ mutex_unlock(&sport_id->mutex);
+
+ return &stpg->tpg;
+}
+
+/**
+ * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @tpg: Target portal group to deregister.
+ */
+static void srpt_drop_tpg(struct se_portal_group *tpg)
+{
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
+ struct srpt_port_id *sport_id = stpg->sport_id;
+ struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+
+ mutex_lock(&sport_id->mutex);
+ list_del(&stpg->entry);
+ mutex_unlock(&sport_id->mutex);
+
+ sport->enabled = false;
+ core_tpg_deregister(tpg);
+ kfree(stpg);
+}
+
+/**
+ * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
+ * @tf: Not used.
+ * @group: Not used.
+ * @name: $port.
+ */
+static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct port_and_port_id papi = srpt_lookup_port(name);
+ struct srpt_port *sport = papi.sport;
+ struct srpt_port_id *port_id;
+
+ if (!papi.port_id)
+ return ERR_PTR(-EINVAL);
+ if (*papi.port_id) {
+ /* Attempt to create a directory that already exists. */
+ WARN_ON_ONCE(true);
+ return &(*papi.port_id)->wwn;
+ }
+ port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
+ if (!port_id) {
+ srpt_sdev_put(sport->sdev);
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&port_id->mutex);
+ INIT_LIST_HEAD(&port_id->tpg_list);
+ port_id->wwn.priv = sport;
+ memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
+ sport->gid_name, ARRAY_SIZE(port_id->name));
+
+ *papi.port_id = port_id;
+
+ return &port_id->wwn;
+}
+
+/**
+ * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
+ * @wwn: $port.
+ */
+static void srpt_drop_tport(struct se_wwn *wwn)
+{
+ struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
+ struct srpt_port *sport = wwn->priv;
+
+ if (sport->guid_id == port_id)
+ sport->guid_id = NULL;
+ else if (sport->gid_id == port_id)
+ sport->gid_id = NULL;
+ else
+ WARN_ON_ONCE(true);
+
+ srpt_sdev_put(sport->sdev);
+ kfree(port_id);
+}
+
+static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
+{
+ return sysfs_emit(buf, "\n");
+}
+
+CONFIGFS_ATTR_RO(srpt_wwn_, version);
+
+static struct configfs_attribute *srpt_wwn_attrs[] = {
+ &srpt_wwn_attr_version,
+ NULL,
+};
+
+static const struct target_core_fabric_ops srpt_template = {
+ .module = THIS_MODULE,
+ .fabric_name = "srpt",
+ .tpg_get_wwn = srpt_get_fabric_wwn,
+ .tpg_get_tag = srpt_get_tag,
+ .tpg_check_demo_mode = srpt_check_false,
+ .tpg_check_demo_mode_cache = srpt_check_true,
+ .tpg_check_demo_mode_write_protect = srpt_check_true,
+ .tpg_check_prod_mode_write_protect = srpt_check_false,
+ .tpg_get_inst_index = srpt_tpg_get_inst_index,
+ .release_cmd = srpt_release_cmd,
+ .check_stop_free = srpt_check_stop_free,
+ .close_session = srpt_close_session,
+ .sess_get_index = srpt_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = srpt_write_pending,
+ .set_default_node_attributes = srpt_set_default_node_attrs,
+ .get_cmd_state = srpt_get_tcm_cmd_state,
+ .queue_data_in = srpt_queue_data_in,
+ .queue_status = srpt_queue_status,
+ .queue_tm_rsp = srpt_queue_tm_rsp,
+ .aborted_task = srpt_aborted_task,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = srpt_make_tport,
+ .fabric_drop_wwn = srpt_drop_tport,
+ .fabric_make_tpg = srpt_make_tpg,
+ .fabric_enable_tpg = srpt_enable_tpg,
+ .fabric_drop_tpg = srpt_drop_tpg,
+ .fabric_init_nodeacl = srpt_init_nodeacl,
+
+ .tfc_discovery_attrs = srpt_da_attrs,
+ .tfc_wwn_attrs = srpt_wwn_attrs,
+ .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
+};
+
+/**
+ * srpt_init_module - kernel module initialization
+ *
+ * Note: Since ib_register_client() registers callback functions, and since at
+ * least one of these callback functions (srpt_add_one()) calls target core
+ * functions, this driver must be registered with the target core before
+ * ib_register_client() is called.
+ */
+static int __init srpt_init_module(void)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
+ pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n",
+ srp_max_req_size, MIN_MAX_REQ_SIZE);
+ goto out;
+ }
+
+ if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
+ || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
+ pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n",
+ srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
+ goto out;
+ }
+
+ ret = target_register_template(&srpt_template);
+ if (ret)
+ goto out;
+
+ ret = ib_register_client(&srpt_client);
+ if (ret) {
+ pr_err("couldn't register IB client\n");
+ goto out_unregister_target;
+ }
+
+ return 0;
+
+out_unregister_target:
+ target_unregister_template(&srpt_template);
+out:
+ return ret;
+}
+
+static void __exit srpt_cleanup_module(void)
+{
+ if (rdma_cm_id)
+ rdma_destroy_id(rdma_cm_id);
+ ib_unregister_client(&srpt_client);
+ target_unregister_template(&srpt_template);
+}
+
+module_init(srpt_init_module);
+module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 000000000..4c46b301e
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_SRPT_H
+#define IB_SRPT_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+
+#include <scsi/srp.h>
+
+#include "ib_dm_mad.h"
+
+/*
+ * The prefix the ServiceName field must start with in the device management
+ * ServiceEntries attribute pair. See also the SRP specification.
+ */
+#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+
+struct srpt_nexus;
+
+enum {
+ /*
+ * SRP IOControllerProfile attributes for SRP target ports that have
+ * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
+ * in the SRP specification.
+ */
+ SRP_PROTOCOL = 0x0108,
+ SRP_PROTOCOL_VERSION = 0x0001,
+ SRP_IO_SUBCLASS = 0x609e,
+ SRP_SEND_TO_IOC = 0x01,
+ SRP_SEND_FROM_IOC = 0x02,
+ SRP_RDMA_READ_FROM_IOC = 0x08,
+ SRP_RDMA_WRITE_FROM_IOC = 0x20,
+
+ /*
+ * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
+ * specification.
+ */
+ SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
+ SRP_LOSOLNT = 0x10, /* logout solicited notification */
+ SRP_CRSOLNT = 0x20, /* credit request solicited notification */
+ SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
+
+ /*
+ * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
+ * 18 and 20 in the SRP specification.
+ */
+ SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
+ SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
+
+ /*
+ * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
+ * 16 and 22 in the SRP specification.
+ */
+ SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
+
+ /* See also table 24 in the SRP specification. */
+ SRP_TSK_MGMT_SUCCESS = 0x00,
+ SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
+ SRP_TSK_MGMT_FAILED = 0x05,
+
+ /* See also table 21 in the SRP specification. */
+ SRP_CMD_SIMPLE_Q = 0x0,
+ SRP_CMD_HEAD_OF_Q = 0x1,
+ SRP_CMD_ORDERED_Q = 0x2,
+ SRP_CMD_ACA = 0x4,
+
+ SRPT_DEF_SG_TABLESIZE = 128,
+
+ MIN_SRPT_SQ_SIZE = 16,
+ DEF_SRPT_SQ_SIZE = 4096,
+ MAX_SRPT_RQ_SIZE = 128,
+ MIN_SRPT_SRQ_SIZE = 4,
+ DEFAULT_SRPT_SRQ_SIZE = 4095,
+ MAX_SRPT_SRQ_SIZE = 65535,
+ MAX_SRPT_RDMA_SIZE = 1U << 24,
+ MAX_SRPT_RSP_SIZE = 1024,
+
+ SRP_MAX_ADD_CDB_LEN = 16,
+ SRP_MAX_IMM_DATA_OFFSET = 80,
+ SRP_MAX_IMM_DATA = 8 * 1024,
+ MIN_MAX_REQ_SIZE = 996,
+ DEFAULT_MAX_REQ_SIZE_1 = sizeof(struct srp_cmd)/*48*/ +
+ SRP_MAX_ADD_CDB_LEN +
+ sizeof(struct srp_indirect_buf)/*20*/ +
+ 128 * sizeof(struct srp_direct_buf)/*16*/,
+ DEFAULT_MAX_REQ_SIZE_2 = SRP_MAX_IMM_DATA_OFFSET +
+ sizeof(struct srp_imm_buf) + SRP_MAX_IMM_DATA,
+ DEFAULT_MAX_REQ_SIZE = DEFAULT_MAX_REQ_SIZE_1 > DEFAULT_MAX_REQ_SIZE_2 ?
+ DEFAULT_MAX_REQ_SIZE_1 : DEFAULT_MAX_REQ_SIZE_2,
+
+ MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
+ DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
+
+ DEFAULT_MAX_RDMA_SIZE = 65536,
+};
+
+/**
+ * enum srpt_command_state - SCSI command state managed by SRPT
+ * @SRPT_STATE_NEW: New command arrived and is being processed.
+ * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
+ * for data arrival.
+ * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
+ * being processed.
+ * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
+ * @SRPT_STATE_MGMT: Processing a SCSI task management command.
+ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
+ * @SRPT_STATE_DONE: Command processing finished successfully, command
+ * processing has been aborted or command processing
+ * failed.
+ */
+enum srpt_command_state {
+ SRPT_STATE_NEW = 0,
+ SRPT_STATE_NEED_DATA = 1,
+ SRPT_STATE_DATA_IN = 2,
+ SRPT_STATE_CMD_RSP_SENT = 3,
+ SRPT_STATE_MGMT = 4,
+ SRPT_STATE_MGMT_RSP_SENT = 5,
+ SRPT_STATE_DONE = 6,
+};
+
+/**
+ * struct srpt_ioctx - shared SRPT I/O context information
+ * @cqe: Completion queue element.
+ * @buf: Pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @offset: Offset of the first byte in @buf and @dma that is actually used.
+ * @index: Index of the I/O context in its ioctx_ring array.
+ */
+struct srpt_ioctx {
+ struct ib_cqe cqe;
+ void *buf;
+ dma_addr_t dma;
+ uint32_t offset;
+ uint32_t index;
+};
+
+/**
+ * struct srpt_recv_ioctx - SRPT receive I/O context
+ * @ioctx: See above.
+ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
+ * @byte_len: Number of bytes in @ioctx.buf.
+ */
+struct srpt_recv_ioctx {
+ struct srpt_ioctx ioctx;
+ struct list_head wait_list;
+ int byte_len;
+};
+
+struct srpt_rw_ctx {
+ struct rdma_rw_ctx rw;
+ struct scatterlist *sg;
+ unsigned int nents;
+};
+
+/**
+ * struct srpt_send_ioctx - SRPT send I/O context
+ * @ioctx: See above.
+ * @ch: Channel pointer.
+ * @recv_ioctx: Receive I/O context associated with this send I/O context.
+ * Only used for processing immediate data.
+ * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed.
+ * @rw_ctxs: RDMA read/write contexts.
+ * @imm_sg: Scatterlist for immediate data.
+ * @rdma_cqe: RDMA completion queue element.
+ * @state: I/O context state.
+ * @cmd: Target core command data structure.
+ * @sense_data: SCSI sense data.
+ * @n_rdma: Number of work requests needed to transfer this ioctx.
+ * @n_rw_ctx: Size of rw_ctxs array.
+ * @queue_status_only: Send a SCSI status back to the initiator but no data.
+ * @sense_data: Sense data to be sent to the initiator.
+ */
+struct srpt_send_ioctx {
+ struct srpt_ioctx ioctx;
+ struct srpt_rdma_ch *ch;
+ struct srpt_recv_ioctx *recv_ioctx;
+
+ struct srpt_rw_ctx s_rw_ctx;
+ struct srpt_rw_ctx *rw_ctxs;
+
+ struct scatterlist imm_sg;
+
+ struct ib_cqe rdma_cqe;
+ enum srpt_command_state state;
+ struct se_cmd cmd;
+ u8 n_rdma;
+ u8 n_rw_ctx;
+ bool queue_status_only;
+ u8 sense_data[TRANSPORT_SENSE_BUFFER];
+};
+
+/**
+ * enum rdma_ch_state - SRP channel state
+ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
+ * @CH_LIVE: QP is in RTS state.
+ * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
+ * been received.
+ * @CH_DRAINING: DREP has been received or waiting for DREP timed out
+ * and last work request has been queued.
+ * @CH_DISCONNECTED: Last completion has been received.
+ */
+enum rdma_ch_state {
+ CH_CONNECTING,
+ CH_LIVE,
+ CH_DISCONNECTING,
+ CH_DRAINING,
+ CH_DISCONNECTED,
+};
+
+/**
+ * struct srpt_rdma_ch - RDMA channel
+ * @nexus: I_T nexus this channel is associated with.
+ * @qp: IB queue pair used for communicating over this channel.
+ * @ib_cm: See below.
+ * @ib_cm.cm_id: IB CM ID associated with the channel.
+ * @rdma_cm: See below.
+ * @rdma_cm.cm_id: RDMA CM ID associated with the channel.
+ * @cq: IB completion queue for this channel.
+ * @cq_size: Number of CQEs in @cq.
+ * @zw_cqe: Zero-length write CQE.
+ * @rcu: RCU head.
+ * @kref: kref for this channel.
+ * @closed: Completion object that will be signaled as soon as a new
+ * channel object with the same identity can be created.
+ * @rq_size: IB receive queue size.
+ * @max_rsp_size: Maximum size of an RSP response message in bytes.
+ * @sq_wr_avail: number of work requests available in the send queue.
+ * @sport: pointer to the information of the HCA port used by this
+ * channel.
+ * @max_ti_iu_len: maximum target-to-initiator information unit length.
+ * @req_lim: request limit: maximum number of requests that may be sent
+ * by the initiator without having received a response.
+ * @req_lim_delta: Number of credits not yet sent back to the initiator.
+ * @imm_data_offset: Offset from start of SRP_CMD for immediate data.
+ * @spinlock: Protects free_list and state.
+ * @state: channel state. See also enum rdma_ch_state.
+ * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel.
+ * @processing_wait_list: Whether or not cmd_wait_list is being processed.
+ * @rsp_buf_cache: kmem_cache for @ioctx_ring.
+ * @ioctx_ring: Send ring.
+ * @req_buf_cache: kmem_cache for @ioctx_recv_ring.
+ * @ioctx_recv_ring: Receive I/O context ring.
+ * @list: Node in srpt_nexus.ch_list.
+ * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
+ * list contains struct srpt_ioctx elements and is protected
+ * against concurrent modification by the cm_id spinlock.
+ * @pkey: P_Key of the IB partition for this SRP channel.
+ * @sess: Session information associated with this SRP channel.
+ * @sess_name: Session name.
+ * @release_work: Allows scheduling of srpt_release_channel().
+ */
+struct srpt_rdma_ch {
+ struct srpt_nexus *nexus;
+ struct ib_qp *qp;
+ union {
+ struct {
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ struct {
+ struct rdma_cm_id *cm_id;
+ } rdma_cm;
+ };
+ struct ib_cq *cq;
+ u32 cq_size;
+ struct ib_cqe zw_cqe;
+ struct rcu_head rcu;
+ struct kref kref;
+ struct completion *closed;
+ int rq_size;
+ u32 max_rsp_size;
+ atomic_t sq_wr_avail;
+ struct srpt_port *sport;
+ int max_ti_iu_len;
+ atomic_t req_lim;
+ atomic_t req_lim_delta;
+ u16 imm_data_offset;
+ spinlock_t spinlock;
+ enum rdma_ch_state state;
+ struct kmem_cache *rsp_buf_cache;
+ struct srpt_send_ioctx **ioctx_ring;
+ struct kmem_cache *req_buf_cache;
+ struct srpt_recv_ioctx **ioctx_recv_ring;
+ struct list_head list;
+ struct list_head cmd_wait_list;
+ uint16_t pkey;
+ bool using_rdma_cm;
+ bool processing_wait_list;
+ struct se_session *sess;
+ u8 sess_name[40];
+ struct work_struct release_work;
+};
+
+/**
+ * struct srpt_nexus - I_T nexus
+ * @rcu: RCU head for this data structure.
+ * @entry: srpt_port.nexus_list list node.
+ * @ch_list: struct srpt_rdma_ch list. Protected by srpt_port.mutex.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ */
+struct srpt_nexus {
+ struct rcu_head rcu;
+ struct list_head entry;
+ struct list_head ch_list;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+};
+
+/**
+ * struct srpt_port_attrib - attributes for SRPT port
+ * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
+ * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
+ * @srp_sq_size: Shared receive queue (SRQ) size.
+ * @use_srq: Whether or not to use SRQ.
+ */
+struct srpt_port_attrib {
+ u32 srp_max_rdma_size;
+ u32 srp_max_rsp_size;
+ u32 srp_sq_size;
+ bool use_srq;
+};
+
+/**
+ * struct srpt_tpg - information about a single "target portal group"
+ * @entry: Entry in @sport_id->tpg_list.
+ * @sport_id: Port name this TPG is associated with.
+ * @tpg: LIO TPG data structure.
+ *
+ * Zero or more target portal groups are associated with each port name
+ * (srpt_port_id). With each TPG an ACL list is associated.
+ */
+struct srpt_tpg {
+ struct list_head entry;
+ struct srpt_port_id *sport_id;
+ struct se_portal_group tpg;
+};
+
+/**
+ * struct srpt_port_id - LIO RDMA port information
+ * @mutex: Protects @tpg_list changes.
+ * @tpg_list: TPGs associated with the RDMA port name.
+ * @wwn: WWN associated with the RDMA port name.
+ * @name: ASCII representation of the port name.
+ *
+ * Multiple sysfs directories can be associated with a single RDMA port. This
+ * data structure represents a single (port, name) pair.
+ */
+struct srpt_port_id {
+ struct mutex mutex;
+ struct list_head tpg_list;
+ struct se_wwn wwn;
+ char name[64];
+};
+
+/**
+ * struct srpt_port - SRPT RDMA port information
+ * @sdev: backpointer to the HCA information.
+ * @mad_agent: per-port management datagram processing information.
+ * @enabled: Whether or not this target port is enabled.
+ * @port: one-based port number.
+ * @sm_lid: cached value of the port's sm_lid.
+ * @lid: cached value of the port's lid.
+ * @gid: cached value of the port's gid.
+ * @work: work structure for refreshing the aforementioned cached values.
+ * @guid_name: port name in GUID format.
+ * @guid_id: LIO target port information for the port name in GUID format.
+ * @gid_name: port name in GID format.
+ * @gid_id: LIO target port information for the port name in GID format.
+ * @port_attrib: Port attributes that can be accessed through configfs.
+ * @refcount: Number of objects associated with this port.
+ * @freed_channels: Completion that will be signaled once @refcount becomes 0.
+ * @mutex: Protects nexus_list.
+ * @nexus_list: Nexus list. See also srpt_nexus.entry.
+ */
+struct srpt_port {
+ struct srpt_device *sdev;
+ struct ib_mad_agent *mad_agent;
+ bool enabled;
+ u8 port;
+ u32 sm_lid;
+ u32 lid;
+ union ib_gid gid;
+ struct work_struct work;
+ char guid_name[64];
+ struct srpt_port_id *guid_id;
+ char gid_name[64];
+ struct srpt_port_id *gid_id;
+ struct srpt_port_attrib port_attrib;
+ atomic_t refcount;
+ struct completion *freed_channels;
+ struct mutex mutex;
+ struct list_head nexus_list;
+};
+
+/**
+ * struct srpt_device - information associated by SRPT with a single HCA
+ * @refcnt: Reference count for this device.
+ * @device: Backpointer to the struct ib_device managed by the IB core.
+ * @pd: IB protection domain.
+ * @lkey: L_Key (local key) with write access to all local memory.
+ * @srq: Per-HCA SRQ (shared receive queue).
+ * @cm_id: Connection identifier.
+ * @srq_size: SRQ size.
+ * @sdev_mutex: Serializes use_srq changes.
+ * @use_srq: Whether or not to use SRQ.
+ * @req_buf_cache: kmem_cache for @ioctx_ring buffers.
+ * @ioctx_ring: Per-HCA SRQ.
+ * @event_handler: Per-HCA asynchronous IB event handler.
+ * @list: Node in srpt_dev_list.
+ * @port: Information about the ports owned by this HCA.
+ */
+struct srpt_device {
+ struct kref refcnt;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ u32 lkey;
+ struct ib_srq *srq;
+ struct ib_cm_id *cm_id;
+ int srq_size;
+ struct mutex sdev_mutex;
+ bool use_srq;
+ struct kmem_cache *req_buf_cache;
+ struct srpt_recv_ioctx **ioctx_ring;
+ struct ib_event_handler event_handler;
+ struct list_head list;
+ struct srpt_port port[];
+};
+
+#endif /* IB_SRPT_H */