diff options
Diffstat (limited to '')
42 files changed, 28589 insertions, 0 deletions
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile new file mode 100644 index 000000000..437813c7b --- /dev/null +++ b/drivers/infiniband/ulp/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_INFINIBAND_IPOIB) += ipoib/ +obj-$(CONFIG_INFINIBAND_SRP) += srp/ +obj-$(CONFIG_INFINIBAND_SRPT) += srpt/ +obj-$(CONFIG_INFINIBAND_ISER) += iser/ +obj-$(CONFIG_INFINIBAND_ISERT) += isert/ +obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic/ diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig new file mode 100644 index 000000000..cda8eac55 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/Kconfig @@ -0,0 +1,49 @@ +config INFINIBAND_IPOIB + tristate "IP-over-InfiniBand" + depends on NETDEVICES && INET && (IPV6 || IPV6=n) + ---help--- + Support for the IP-over-InfiniBand protocol (IPoIB). This + transports IP packets over InfiniBand so you can use your IB + device as a fancy NIC. + + See Documentation/infiniband/ipoib.txt for more information + +config INFINIBAND_IPOIB_CM + bool "IP-over-InfiniBand Connected Mode support" + depends on INFINIBAND_IPOIB + default n + ---help--- + This option enables support for IPoIB connected mode. After + enabling this option, you need to switch to connected mode + through /sys/class/net/ibXXX/mode to actually create + connections, and then increase the interface MTU with + e.g. ifconfig ib0 mtu 65520. + + WARNING: Enabling connected mode will trigger some packet + drops for multicast and UD mode traffic from this interface, + unless you limit mtu for these destinations to 2044. + +config INFINIBAND_IPOIB_DEBUG + bool "IP-over-InfiniBand debugging" if EXPERT + depends on INFINIBAND_IPOIB + default y + ---help--- + This option causes debugging code to be compiled into the + IPoIB driver. The output can be turned on via the + debug_level and mcast_debug_level module parameters (which + can also be set after the driver is loaded through sysfs). + + This option also creates a directory tree under ipoib/ in + debugfs, which contains files that expose debugging + information about IB multicast groups used by the IPoIB + driver. + +config INFINIBAND_IPOIB_DEBUG_DATA + bool "IP-over-InfiniBand data path debugging" + depends on INFINIBAND_IPOIB_DEBUG + ---help--- + This option compiles debugging code into the data path + of the IPoIB driver. The output can be turned on via the + data_debug_level module parameter; however, even with output + turned off, this debugging code will have some performance + impact. diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile new file mode 100644 index 000000000..6ece857ed --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o + +ib_ipoib-y := ipoib_main.o \ + ipoib_ib.o \ + ipoib_multicast.o \ + ipoib_verbs.o \ + ipoib_vlan.o \ + ipoib_ethtool.o \ + ipoib_netlink.o +ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o +ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o + diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h new file mode 100644 index 000000000..ef1222101 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -0,0 +1,843 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _IPOIB_H +#define _IPOIB_H + +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/workqueue.h> +#include <linux/kref.h> +#include <linux/if_infiniband.h> +#include <linux/mutex.h> + +#include <net/neighbour.h> +#include <net/sch_generic.h> + +#include <linux/atomic.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_pack.h> +#include <rdma/ib_sa.h> +#include <linux/sched.h> +/* constants */ + +enum ipoib_flush_level { + IPOIB_FLUSH_LIGHT, + IPOIB_FLUSH_NORMAL, + IPOIB_FLUSH_HEAVY +}; + +enum { + IPOIB_ENCAP_LEN = 4, + IPOIB_PSEUDO_LEN = 20, + IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN, + + IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, + IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ + + IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ + IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, + IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, + IPOIB_CM_RX_SG = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE, + IPOIB_RX_RING_SIZE = 256, + IPOIB_TX_RING_SIZE = 128, + IPOIB_MAX_QUEUE_SIZE = 8192, + IPOIB_MIN_QUEUE_SIZE = 2, + IPOIB_CM_MAX_CONN_QP = 4096, + + IPOIB_NUM_WC = 4, + + IPOIB_MAX_PATH_REC_QUEUE = 3, + IPOIB_MAX_MCAST_QUEUE = 64, + + IPOIB_FLAG_OPER_UP = 0, + IPOIB_FLAG_INITIALIZED = 1, + IPOIB_FLAG_ADMIN_UP = 2, + IPOIB_PKEY_ASSIGNED = 3, + IPOIB_FLAG_SUBINTERFACE = 5, + IPOIB_STOP_REAPER = 7, + IPOIB_FLAG_ADMIN_CM = 9, + IPOIB_FLAG_UMCAST = 10, + IPOIB_NEIGH_TBL_FLUSH = 12, + IPOIB_FLAG_DEV_ADDR_SET = 13, + IPOIB_FLAG_DEV_ADDR_CTRL = 14, + + IPOIB_MAX_BACKOFF_SECONDS = 16, + + IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ + IPOIB_MCAST_FLAG_SENDONLY = 1, + /* + * For IPOIB_MCAST_FLAG_BUSY + * When set, in flight join and mcast->mc is unreliable + * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or + * haven't started yet + * When clear and mcast->mc is valid pointer, join was successful + */ + IPOIB_MCAST_FLAG_BUSY = 2, + IPOIB_MCAST_FLAG_ATTACHED = 3, + + MAX_SEND_CQE = 64, + IPOIB_CM_COPYBREAK = 256, + + IPOIB_NON_CHILD = 0, + IPOIB_LEGACY_CHILD = 1, + IPOIB_RTNL_CHILD = 2, +}; + +#define IPOIB_OP_RECV (1ul << 31) +#ifdef CONFIG_INFINIBAND_IPOIB_CM +#define IPOIB_OP_CM (1ul << 30) +#else +#define IPOIB_OP_CM (0) +#endif + +#define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF)) + +/* structs */ + +struct ipoib_header { + __be16 proto; + u16 reserved; +}; + +struct ipoib_pseudo_header { + u8 hwaddr[INFINIBAND_ALEN]; +}; + +static inline void skb_add_pseudo_hdr(struct sk_buff *skb) +{ + char *data = skb_push(skb, IPOIB_PSEUDO_LEN); + + /* + * only the ipoib header is present now, make room for a dummy + * pseudo header and set skb field accordingly + */ + memset(data, 0, IPOIB_PSEUDO_LEN); + skb_reset_mac_header(skb); + skb_pull(skb, IPOIB_HARD_LEN); +} + +static inline struct ipoib_dev_priv *ipoib_priv(const struct net_device *dev) +{ + struct rdma_netdev *rn = netdev_priv(dev); + + return rn->clnt_priv; +} + +/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ +struct ipoib_mcast { + struct ib_sa_mcmember_rec mcmember; + struct ib_sa_multicast *mc; + struct ipoib_ah *ah; + + struct rb_node rb_node; + struct list_head list; + + unsigned long created; + unsigned long backoff; + unsigned long delay_until; + + unsigned long flags; + unsigned char logcount; + + struct list_head neigh_list; + + struct sk_buff_head pkt_queue; + + struct net_device *dev; + struct completion done; +}; + +struct ipoib_rx_buf { + struct sk_buff *skb; + u64 mapping[IPOIB_UD_RX_SG]; +}; + +struct ipoib_tx_buf { + struct sk_buff *skb; + u64 mapping[MAX_SKB_FRAGS + 1]; +}; + +struct ib_cm_id; + +struct ipoib_cm_data { + __be32 qpn; /* High byte MUST be ignored on receive */ + __be32 mtu; +}; + +/* + * Quoting 10.3.1 Queue Pair and EE Context States: + * + * Note, for QPs that are associated with an SRQ, the Consumer should take the + * QP through the Error State before invoking a Destroy QP or a Modify QP to the + * Reset State. The Consumer may invoke the Destroy QP without first performing + * a Modify QP to the Error State and waiting for the Affiliated Asynchronous + * Last WQE Reached Event. However, if the Consumer does not wait for the + * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment + * leakage may occur. Therefore, it is good programming practice to tear down a + * QP that is associated with an SRQ by using the following process: + * + * - Put the QP in the Error State + * - Wait for the Affiliated Asynchronous Last WQE Reached Event; + * - either: + * drain the CQ by invoking the Poll CQ verb and either wait for CQ + * to be empty or the number of Poll CQ operations has exceeded + * CQ capacity size; + * - or + * post another WR that completes on the same CQ and wait for this + * WR to return as a WC; + * - and then invoke a Destroy QP or Reset QP. + * + * We use the second option and wait for a completion on the + * same CQ before destroying QPs attached to our SRQ. + */ + +enum ipoib_cm_state { + IPOIB_CM_RX_LIVE, + IPOIB_CM_RX_ERROR, /* Ignored by stale task */ + IPOIB_CM_RX_FLUSH /* Last WQE Reached event observed */ +}; + +struct ipoib_cm_rx { + struct ib_cm_id *id; + struct ib_qp *qp; + struct ipoib_cm_rx_buf *rx_ring; + struct list_head list; + struct net_device *dev; + unsigned long jiffies; + enum ipoib_cm_state state; + int recv_count; +}; + +struct ipoib_cm_tx { + struct ib_cm_id *id; + struct ib_qp *qp; + struct list_head list; + struct net_device *dev; + struct ipoib_neigh *neigh; + struct ipoib_tx_buf *tx_ring; + unsigned int tx_head; + unsigned int tx_tail; + unsigned long flags; + u32 mtu; + unsigned int max_send_sge; +}; + +struct ipoib_cm_rx_buf { + struct sk_buff *skb; + u64 mapping[IPOIB_CM_RX_SG]; +}; + +struct ipoib_cm_dev_priv { + struct ib_srq *srq; + struct ipoib_cm_rx_buf *srq_ring; + struct ib_cm_id *id; + struct list_head passive_ids; /* state: LIVE */ + struct list_head rx_error_list; /* state: ERROR */ + struct list_head rx_flush_list; /* state: FLUSH, drain not started */ + struct list_head rx_drain_list; /* state: FLUSH, drain started */ + struct list_head rx_reap_list; /* state: FLUSH, drain done */ + struct work_struct start_task; + struct work_struct reap_task; + struct work_struct skb_task; + struct work_struct rx_reap_task; + struct delayed_work stale_task; + struct sk_buff_head skb_queue; + struct list_head start_list; + struct list_head reap_list; + struct ib_wc ibwc[IPOIB_NUM_WC]; + struct ib_sge rx_sge[IPOIB_CM_RX_SG]; + struct ib_recv_wr rx_wr; + int nonsrq_conn_qp; + int max_cm_mtu; + int num_frags; +}; + +struct ipoib_ethtool_st { + u16 coalesce_usecs; + u16 max_coalesced_frames; +}; + +struct ipoib_neigh_table; + +struct ipoib_neigh_hash { + struct ipoib_neigh_table *ntbl; + struct ipoib_neigh __rcu **buckets; + struct rcu_head rcu; + u32 mask; + u32 size; +}; + +struct ipoib_neigh_table { + struct ipoib_neigh_hash __rcu *htbl; + atomic_t entries; + struct completion flushed; + struct completion deleted; +}; + +struct ipoib_qp_state_validate { + struct work_struct work; + struct ipoib_dev_priv *priv; +}; + +/* + * Device private locking: network stack tx_lock protects members used + * in TX fast path, lock protects everything else. lock nests inside + * of tx_lock (ie tx_lock must be acquired first if needed). + */ +struct ipoib_dev_priv { + spinlock_t lock; + + struct net_device *dev; + void (*next_priv_destructor)(struct net_device *dev); + + struct napi_struct send_napi; + struct napi_struct recv_napi; + + unsigned long flags; + + /* + * This protects access to the child_intfs list. + * To READ from child_intfs the RTNL or vlan_rwsem read side must be + * held. To WRITE RTNL and the vlan_rwsem write side must be held (in + * that order) This lock exists because we have a few contexts where + * we need the child_intfs, but do not want to grab the RTNL. + */ + struct rw_semaphore vlan_rwsem; + struct mutex mcast_mutex; + + struct rb_root path_tree; + struct list_head path_list; + + struct ipoib_neigh_table ntbl; + + struct ipoib_mcast *broadcast; + struct list_head multicast_list; + struct rb_root multicast_tree; + + struct workqueue_struct *wq; + struct delayed_work mcast_task; + struct work_struct carrier_on_task; + struct work_struct flush_light; + struct work_struct flush_normal; + struct work_struct flush_heavy; + struct work_struct restart_task; + struct delayed_work ah_reap_task; + struct delayed_work neigh_reap_task; + struct ib_device *ca; + u8 port; + u16 pkey; + u16 pkey_index; + struct ib_pd *pd; + struct ib_cq *recv_cq; + struct ib_cq *send_cq; + struct ib_qp *qp; + u32 qkey; + + union ib_gid local_gid; + u32 local_lid; + + unsigned int admin_mtu; + unsigned int mcast_mtu; + unsigned int max_ib_mtu; + + struct ipoib_rx_buf *rx_ring; + + struct ipoib_tx_buf *tx_ring; + /* cyclic ring variables for managing tx_ring, for UD only */ + unsigned int tx_head; + unsigned int tx_tail; + /* cyclic ring variables for counting overall outstanding send WRs */ + unsigned int global_tx_head; + unsigned int global_tx_tail; + struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; + struct ib_ud_wr tx_wr; + struct ib_wc send_wc[MAX_SEND_CQE]; + + struct ib_recv_wr rx_wr; + struct ib_sge rx_sge[IPOIB_UD_RX_SG]; + + struct ib_wc ibwc[IPOIB_NUM_WC]; + + struct list_head dead_ahs; + + struct ib_event_handler event_handler; + + struct net_device *parent; + struct list_head child_intfs; + struct list_head list; + int child_type; + +#ifdef CONFIG_INFINIBAND_IPOIB_CM + struct ipoib_cm_dev_priv cm; +#endif + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + struct list_head fs_list; + struct dentry *mcg_dentry; + struct dentry *path_dentry; +#endif + u64 hca_caps; + struct ipoib_ethtool_st ethtool; + unsigned int max_send_sge; + bool sm_fullmember_sendonly_support; + const struct net_device_ops *rn_ops; +}; + +struct ipoib_ah { + struct net_device *dev; + struct ib_ah *ah; + struct list_head list; + struct kref ref; + unsigned int last_send; + int valid; +}; + +struct ipoib_path { + struct net_device *dev; + struct sa_path_rec pathrec; + struct ipoib_ah *ah; + struct sk_buff_head queue; + + struct list_head neigh_list; + + int query_id; + struct ib_sa_query *query; + struct completion done; + + struct rb_node rb_node; + struct list_head list; +}; + +struct ipoib_neigh { + struct ipoib_ah *ah; +#ifdef CONFIG_INFINIBAND_IPOIB_CM + struct ipoib_cm_tx *cm; +#endif + u8 daddr[INFINIBAND_ALEN]; + struct sk_buff_head queue; + + struct net_device *dev; + + struct list_head list; + struct ipoib_neigh __rcu *hnext; + struct rcu_head rcu; + atomic_t refcnt; + unsigned long alive; +}; + +#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN) +#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES) + +void ipoib_neigh_dtor(struct ipoib_neigh *neigh); +static inline void ipoib_neigh_put(struct ipoib_neigh *neigh) +{ + if (atomic_dec_and_test(&neigh->refcnt)) + ipoib_neigh_dtor(neigh); +} +struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr); +struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, + struct net_device *dev); +void ipoib_neigh_free(struct ipoib_neigh *neigh); +void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid); + +extern struct workqueue_struct *ipoib_workqueue; + +/* functions */ + +int ipoib_rx_poll(struct napi_struct *napi, int budget); +int ipoib_tx_poll(struct napi_struct *napi, int budget); +void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr); +void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr); + +struct ipoib_ah *ipoib_create_ah(struct net_device *dev, + struct ib_pd *pd, struct rdma_ah_attr *attr); +void ipoib_free_ah(struct kref *kref); +static inline void ipoib_put_ah(struct ipoib_ah *ah) +{ + kref_put(&ah->ref, ipoib_free_ah); +} +int ipoib_open(struct net_device *dev); +void ipoib_intf_free(struct net_device *dev); +int ipoib_add_pkey_attr(struct net_device *dev); +int ipoib_add_umcast_attr(struct net_device *dev); + +int ipoib_send(struct net_device *dev, struct sk_buff *skb, + struct ib_ah *address, u32 dqpn); +void ipoib_reap_ah(struct work_struct *work); + +struct ipoib_path *__path_find(struct net_device *dev, void *gid); +void ipoib_mark_paths_invalid(struct net_device *dev); +void ipoib_flush_paths(struct net_device *dev); +struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port, + const char *format); +void ipoib_ib_tx_timer_func(struct timer_list *t); +void ipoib_ib_dev_flush_light(struct work_struct *work); +void ipoib_ib_dev_flush_normal(struct work_struct *work); +void ipoib_ib_dev_flush_heavy(struct work_struct *work); +void ipoib_pkey_event(struct work_struct *work); +void ipoib_ib_dev_cleanup(struct net_device *dev); + +int ipoib_ib_dev_open_default(struct net_device *dev); +int ipoib_ib_dev_open(struct net_device *dev); +void ipoib_ib_dev_stop(struct net_device *dev); +void ipoib_ib_dev_up(struct net_device *dev); +void ipoib_ib_dev_down(struct net_device *dev); +int ipoib_ib_dev_stop_default(struct net_device *dev); +void ipoib_pkey_dev_check_presence(struct net_device *dev); + +void ipoib_mcast_join_task(struct work_struct *work); +void ipoib_mcast_carrier_on_task(struct work_struct *work); +void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); + +void ipoib_mcast_restart_task(struct work_struct *work); +void ipoib_mcast_start_thread(struct net_device *dev); +int ipoib_mcast_stop_thread(struct net_device *dev); + +void ipoib_mcast_dev_down(struct net_device *dev); +void ipoib_mcast_dev_flush(struct net_device *dev); + +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req); +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req); + +static inline void ipoib_build_sge(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) +{ + int i, off; + struct sk_buff *skb = tx_req->skb; + skb_frag_t *frags = skb_shinfo(skb)->frags; + int nr_frags = skb_shinfo(skb)->nr_frags; + u64 *mapping = tx_req->mapping; + + if (skb_headlen(skb)) { + priv->tx_sge[0].addr = mapping[0]; + priv->tx_sge[0].length = skb_headlen(skb); + off = 1; + } else + off = 0; + + for (i = 0; i < nr_frags; ++i) { + priv->tx_sge[i + off].addr = mapping[i + off]; + priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); + } + priv->tx_wr.wr.num_sge = nr_frags + off; +} + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); +int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); +void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, + union ib_gid *gid, + unsigned long *created, + unsigned int *queuelen, + unsigned int *complete, + unsigned int *send_only); + +struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev); +int ipoib_path_iter_next(struct ipoib_path_iter *iter); +void ipoib_path_iter_read(struct ipoib_path_iter *iter, + struct ipoib_path *path); +#endif + +int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca, + union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey); +int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca, + union ib_gid *mgid, u16 mlid); +void ipoib_mcast_remove_list(struct list_head *remove_list); +void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid, + struct list_head *remove_list); + +int ipoib_init_qp(struct net_device *dev); +int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); +void ipoib_transport_dev_cleanup(struct net_device *dev); + +void ipoib_event(struct ib_event_handler *handler, + struct ib_event *record); + +int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); +int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); + +int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, + u16 pkey, int child_type); + +int __init ipoib_netlink_init(void); +void __exit ipoib_netlink_fini(void); + +void ipoib_set_umcast(struct net_device *ndev, int umcast_val); +int ipoib_set_mode(struct net_device *dev, const char *buf); + +void ipoib_setup_common(struct net_device *dev); + +void ipoib_pkey_open(struct ipoib_dev_priv *priv); +void ipoib_drain_cq(struct net_device *dev); + +void ipoib_set_ethtool_ops(struct net_device *dev); + +#define IPOIB_FLAGS_RC 0x80 +#define IPOIB_FLAGS_UC 0x40 + +/* We don't support UC connections at the moment */ +#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC)) + +#ifdef CONFIG_INFINIBAND_IPOIB_CM + +extern int ipoib_max_conn_qp; + +static inline int ipoib_cm_admin_enabled(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + return IPOIB_CM_SUPPORTED(dev->dev_addr) && + test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); +} + +static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + return IPOIB_CM_SUPPORTED(hwaddr) && + test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); +} + +static inline int ipoib_cm_up(struct ipoib_neigh *neigh) + +{ + return test_bit(IPOIB_FLAG_OPER_UP, &neigh->cm->flags); +} + +static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh) +{ + return neigh->cm; +} + +static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx) +{ + neigh->cm = tx; +} + +static inline int ipoib_cm_has_srq(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + return !!priv->cm.srq; +} + +static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + return priv->cm.max_cm_mtu; +} + +void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx); +int ipoib_cm_dev_open(struct net_device *dev); +void ipoib_cm_dev_stop(struct net_device *dev); +int ipoib_cm_dev_init(struct net_device *dev); +int ipoib_cm_add_mode_attr(struct net_device *dev); +void ipoib_cm_dev_cleanup(struct net_device *dev); +struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, + struct ipoib_neigh *neigh); +void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx); +void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, + unsigned int mtu); +void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc); +void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc); +#else + +struct ipoib_cm_tx; + +#define ipoib_max_conn_qp 0 + +static inline int ipoib_cm_admin_enabled(struct net_device *dev) +{ + return 0; +} +static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr) + +{ + return 0; +} + +static inline int ipoib_cm_up(struct ipoib_neigh *neigh) + +{ + return 0; +} + +static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh) +{ + return NULL; +} + +static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx) +{ +} + +static inline int ipoib_cm_has_srq(struct net_device *dev) +{ + return 0; +} + +static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev) +{ + return 0; +} + +static inline +void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) +{ + return; +} + +static inline +int ipoib_cm_dev_open(struct net_device *dev) +{ + return 0; +} + +static inline +void ipoib_cm_dev_stop(struct net_device *dev) +{ + return; +} + +static inline +int ipoib_cm_dev_init(struct net_device *dev) +{ + return -EOPNOTSUPP; +} + +static inline +void ipoib_cm_dev_cleanup(struct net_device *dev) +{ + return; +} + +static inline +struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, + struct ipoib_neigh *neigh) +{ + return NULL; +} + +static inline +void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) +{ + return; +} + +static inline +int ipoib_cm_add_mode_attr(struct net_device *dev) +{ + return 0; +} + +static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, + unsigned int mtu) +{ + dev_kfree_skb_any(skb); +} + +static inline void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) +{ +} + +static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) +{ +} +#endif + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +void ipoib_create_debug_files(struct net_device *dev); +void ipoib_delete_debug_files(struct net_device *dev); +int ipoib_register_debugfs(void); +void ipoib_unregister_debugfs(void); +#else +static inline void ipoib_create_debug_files(struct net_device *dev) { } +static inline void ipoib_delete_debug_files(struct net_device *dev) { } +static inline int ipoib_register_debugfs(void) { return 0; } +static inline void ipoib_unregister_debugfs(void) { } +#endif + +#define ipoib_printk(level, priv, format, arg...) \ + printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg) +#define ipoib_warn(priv, format, arg...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + 10 * HZ /*10 seconds */, \ + 100); \ + if (__ratelimit(&_rs)) \ + ipoib_printk(KERN_WARNING, priv, format , ## arg);\ +} while (0) + +extern int ipoib_sendq_size; +extern int ipoib_recvq_size; + +extern struct ib_sa_client ipoib_sa_client; + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +extern int ipoib_debug_level; + +#define ipoib_dbg(priv, format, arg...) \ + do { \ + if (ipoib_debug_level > 0) \ + ipoib_printk(KERN_DEBUG, priv, format , ## arg); \ + } while (0) +#define ipoib_dbg_mcast(priv, format, arg...) \ + do { \ + if (mcast_debug_level > 0) \ + ipoib_printk(KERN_DEBUG, priv, format , ## arg); \ + } while (0) +#else /* CONFIG_INFINIBAND_IPOIB_DEBUG */ +#define ipoib_dbg(priv, format, arg...) \ + do { (void) (priv); } while (0) +#define ipoib_dbg_mcast(priv, format, arg...) \ + do { (void) (priv); } while (0) +#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA +#define ipoib_dbg_data(priv, format, arg...) \ + do { \ + if (data_debug_level > 0) \ + ipoib_printk(KERN_DEBUG, priv, format , ## arg); \ + } while (0) +#else /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */ +#define ipoib_dbg_data(priv, format, arg...) \ + do { (void) (priv); } while (0) +#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */ + +#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) + +extern const char ipoib_driver_version[]; + +#endif /* _IPOIB_H */ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c new file mode 100644 index 000000000..196f1e6b5 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -0,0 +1,1668 @@ +/* + * Copyright (c) 2006 Mellanox Technologies. All rights reserved + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <rdma/ib_cm.h> +#include <net/dst.h> +#include <net/icmp.h> +#include <linux/icmpv6.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/moduleparam.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> + +#include "ipoib.h" + +int ipoib_max_conn_qp = 128; + +module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444); +MODULE_PARM_DESC(max_nonsrq_conn_qp, + "Max number of connected-mode QPs per interface " + "(applied only if shared receive queue is not available)"); + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA +static int data_debug_level; + +module_param_named(cm_data_debug_level, data_debug_level, int, 0644); +MODULE_PARM_DESC(cm_data_debug_level, + "Enable data path debug tracing for connected mode if > 0"); +#endif + +#define IPOIB_CM_IETF_ID 0x1000000000000000ULL + +#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) +#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) +#define IPOIB_CM_RX_DELAY (3 * 256 * HZ) +#define IPOIB_CM_RX_UPDATE_MASK (0x3) + +#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN) + +static struct ib_qp_attr ipoib_cm_err_attr = { + .qp_state = IB_QPS_ERR +}; + +#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff + +static struct ib_send_wr ipoib_cm_rx_drain_wr = { + .opcode = IB_WR_SEND, +}; + +static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event); + +static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, + u64 mapping[IPOIB_CM_RX_SG]) +{ + int i; + + ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); + + for (i = 0; i < frags; ++i) + ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); +} + +static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int i, ret; + + priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; + + for (i = 0; i < priv->cm.num_frags; ++i) + priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; + + ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL); + if (unlikely(ret)) { + ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); + ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1, + priv->cm.srq_ring[id].mapping); + dev_kfree_skb_any(priv->cm.srq_ring[id].skb); + priv->cm.srq_ring[id].skb = NULL; + } + + return ret; +} + +static int ipoib_cm_post_receive_nonsrq(struct net_device *dev, + struct ipoib_cm_rx *rx, + struct ib_recv_wr *wr, + struct ib_sge *sge, int id) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int i, ret; + + wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; + + for (i = 0; i < IPOIB_CM_RX_SG; ++i) + sge[i].addr = rx->rx_ring[id].mapping[i]; + + ret = ib_post_recv(rx->qp, wr, NULL); + if (unlikely(ret)) { + ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); + ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, + rx->rx_ring[id].mapping); + dev_kfree_skb_any(rx->rx_ring[id].skb); + rx->rx_ring[id].skb = NULL; + } + + return ret; +} + +static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, + struct ipoib_cm_rx_buf *rx_ring, + int id, int frags, + u64 mapping[IPOIB_CM_RX_SG], + gfp_t gfp) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct sk_buff *skb; + int i; + + skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16)); + if (unlikely(!skb)) + return NULL; + + /* + * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the + * IP header to a multiple of 16. + */ + skb_reserve(skb, IPOIB_CM_RX_RESERVE); + + mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, + DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { + dev_kfree_skb_any(skb); + return NULL; + } + + for (i = 0; i < frags; i++) { + struct page *page = alloc_page(gfp); + + if (!page) + goto partial_error; + skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); + + mapping[i + 1] = ib_dma_map_page(priv->ca, page, + 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) + goto partial_error; + } + + rx_ring[id].skb = skb; + return skb; + +partial_error: + + ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); + + for (; i > 0; --i) + ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + return NULL; +} + +static void ipoib_cm_free_rx_ring(struct net_device *dev, + struct ipoib_cm_rx_buf *rx_ring) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int i; + + for (i = 0; i < ipoib_recvq_size; ++i) + if (rx_ring[i].skb) { + ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, + rx_ring[i].mapping); + dev_kfree_skb_any(rx_ring[i].skb); + } + + vfree(rx_ring); +} + +static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) +{ + struct ipoib_cm_rx *p; + + /* We only reserved 1 extra slot in CQ for drain WRs, so + * make sure we have at most 1 outstanding WR. */ + if (list_empty(&priv->cm.rx_flush_list) || + !list_empty(&priv->cm.rx_drain_list)) + return; + + /* + * QPs on flush list are error state. This way, a "flush + * error" WC will be immediately generated for each WR we post. + */ + p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); + ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID; + if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL)) + ipoib_warn(priv, "failed to post drain wr\n"); + + list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); +} + +static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) +{ + struct ipoib_cm_rx *p = ctx; + struct ipoib_dev_priv *priv = ipoib_priv(p->dev); + unsigned long flags; + + if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) + return; + + spin_lock_irqsave(&priv->lock, flags); + list_move(&p->list, &priv->cm.rx_flush_list); + p->state = IPOIB_CM_RX_FLUSH; + ipoib_cm_start_rx_drain(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, + struct ipoib_cm_rx *p) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_qp_init_attr attr = { + .event_handler = ipoib_cm_rx_event_handler, + .send_cq = priv->recv_cq, /* For drain WR */ + .recv_cq = priv->recv_cq, + .srq = priv->cm.srq, + .cap.max_send_wr = 1, /* For drain WR */ + .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ + .sq_sig_type = IB_SIGNAL_ALL_WR, + .qp_type = IB_QPT_RC, + .qp_context = p, + }; + + if (!ipoib_cm_has_srq(dev)) { + attr.cap.max_recv_wr = ipoib_recvq_size; + attr.cap.max_recv_sge = IPOIB_CM_RX_SG; + } + + return ib_create_qp(priv->pd, &attr); +} + +static int ipoib_cm_modify_rx_qp(struct net_device *dev, + struct ib_cm_id *cm_id, struct ib_qp *qp, + unsigned int psn) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_qp_attr qp_attr; + int qp_attr_mask, ret; + + qp_attr.qp_state = IB_QPS_INIT; + ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); + return ret; + } + ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); + return ret; + } + qp_attr.qp_state = IB_QPS_RTR; + ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); + return ret; + } + qp_attr.rq_psn = psn; + ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); + return ret; + } + + /* + * Current Mellanox HCA firmware won't generate completions + * with error for drain WRs unless the QP has been moved to + * RTS first. This work-around leaves a window where a QP has + * moved to error asynchronously, but this will eventually get + * fixed in firmware, so let's not error out if modify QP + * fails. + */ + qp_attr.qp_state = IB_QPS_RTS; + ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); + return 0; + } + ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); + return 0; + } + + return 0; +} + +static void ipoib_cm_init_rx_wr(struct net_device *dev, + struct ib_recv_wr *wr, + struct ib_sge *sge) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int i; + + for (i = 0; i < priv->cm.num_frags; ++i) + sge[i].lkey = priv->pd->local_dma_lkey; + + sge[0].length = IPOIB_CM_HEAD_SIZE; + for (i = 1; i < priv->cm.num_frags; ++i) + sge[i].length = PAGE_SIZE; + + wr->next = NULL; + wr->sg_list = sge; + wr->num_sge = priv->cm.num_frags; +} + +static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, + struct ipoib_cm_rx *rx) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct { + struct ib_recv_wr wr; + struct ib_sge sge[IPOIB_CM_RX_SG]; + } *t; + int ret; + int i; + + rx->rx_ring = vzalloc(array_size(ipoib_recvq_size, + sizeof(*rx->rx_ring))); + if (!rx->rx_ring) + return -ENOMEM; + + t = kmalloc(sizeof(*t), GFP_KERNEL); + if (!t) { + ret = -ENOMEM; + goto err_free_1; + } + + ipoib_cm_init_rx_wr(dev, &t->wr, t->sge); + + spin_lock_irq(&priv->lock); + + if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { + spin_unlock_irq(&priv->lock); + ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); + ret = -EINVAL; + goto err_free; + } else + ++priv->cm.nonsrq_conn_qp; + + spin_unlock_irq(&priv->lock); + + for (i = 0; i < ipoib_recvq_size; ++i) { + if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1, + rx->rx_ring[i].mapping, + GFP_KERNEL)) { + ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); + ret = -ENOMEM; + goto err_count; + } + ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); + if (ret) { + ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " + "failed for buf %d\n", i); + ret = -EIO; + goto err_count; + } + } + + rx->recv_count = ipoib_recvq_size; + + kfree(t); + + return 0; + +err_count: + spin_lock_irq(&priv->lock); + --priv->cm.nonsrq_conn_qp; + spin_unlock_irq(&priv->lock); + +err_free: + kfree(t); + +err_free_1: + ipoib_cm_free_rx_ring(dev, rx->rx_ring); + + return ret; +} + +static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, + struct ib_qp *qp, + const struct ib_cm_req_event_param *req, + unsigned int psn) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_data data = {}; + struct ib_cm_rep_param rep = {}; + + data.qpn = cpu_to_be32(priv->qp->qp_num); + data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); + + rep.private_data = &data; + rep.private_data_len = sizeof(data); + rep.flow_control = 0; + rep.rnr_retry_count = req->rnr_retry_count; + rep.srq = ipoib_cm_has_srq(dev); + rep.qp_num = qp->qp_num; + rep.starting_psn = psn; + return ib_send_cm_rep(cm_id, &rep); +} + +static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event) +{ + struct net_device *dev = cm_id->context; + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_rx *p; + unsigned int psn; + int ret; + + ipoib_dbg(priv, "REQ arrived\n"); + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + p->dev = dev; + p->id = cm_id; + cm_id->context = p; + p->state = IPOIB_CM_RX_LIVE; + p->jiffies = jiffies; + INIT_LIST_HEAD(&p->list); + + p->qp = ipoib_cm_create_rx_qp(dev, p); + if (IS_ERR(p->qp)) { + ret = PTR_ERR(p->qp); + goto err_qp; + } + + psn = prandom_u32() & 0xffffff; + ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); + if (ret) + goto err_modify; + + if (!ipoib_cm_has_srq(dev)) { + ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p); + if (ret) + goto err_modify; + } + + spin_lock_irq(&priv->lock); + queue_delayed_work(priv->wq, + &priv->cm.stale_task, IPOIB_CM_RX_DELAY); + /* Add this entry to passive ids list head, but do not re-add it + * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ + p->jiffies = jiffies; + if (p->state == IPOIB_CM_RX_LIVE) + list_move(&p->list, &priv->cm.passive_ids); + spin_unlock_irq(&priv->lock); + + ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); + if (ret) { + ipoib_warn(priv, "failed to send REP: %d\n", ret); + if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) + ipoib_warn(priv, "unable to move qp to error state\n"); + } + return 0; + +err_modify: + ib_destroy_qp(p->qp); +err_qp: + kfree(p); + return ret; +} + +static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event) +{ + struct ipoib_cm_rx *p; + struct ipoib_dev_priv *priv; + + switch (event->event) { + case IB_CM_REQ_RECEIVED: + return ipoib_cm_req_handler(cm_id, event); + case IB_CM_DREQ_RECEIVED: + ib_send_cm_drep(cm_id, NULL, 0); + /* Fall through */ + case IB_CM_REJ_RECEIVED: + p = cm_id->context; + priv = ipoib_priv(p->dev); + if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) + ipoib_warn(priv, "unable to move qp to error state\n"); + /* Fall through */ + default: + return 0; + } +} +/* Adjust length of skb with fragments to match received data */ +static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, + unsigned int length, struct sk_buff *toskb) +{ + int i, num_frags; + unsigned int size; + + /* put header into skb */ + size = min(length, hdr_space); + skb->tail += size; + skb->len += size; + length -= size; + + num_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + if (length == 0) { + /* don't need this page */ + skb_fill_page_desc(toskb, i, skb_frag_page(frag), + 0, PAGE_SIZE); + --skb_shinfo(skb)->nr_frags; + } else { + size = min_t(unsigned int, length, PAGE_SIZE); + + skb_frag_size_set(frag, size); + skb->data_len += size; + skb->truesize += size; + skb->len += size; + length -= size; + } + } +} + +void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_rx_buf *rx_ring; + unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); + struct sk_buff *skb, *newskb; + struct ipoib_cm_rx *p; + unsigned long flags; + u64 mapping[IPOIB_CM_RX_SG]; + int frags; + int has_srq; + struct sk_buff *small_skb; + + ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", + wr_id, wc->status); + + if (unlikely(wr_id >= ipoib_recvq_size)) { + if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { + spin_lock_irqsave(&priv->lock, flags); + list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); + ipoib_cm_start_rx_drain(priv); + queue_work(priv->wq, &priv->cm.rx_reap_task); + spin_unlock_irqrestore(&priv->lock, flags); + } else + ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", + wr_id, ipoib_recvq_size); + return; + } + + p = wc->qp->qp_context; + + has_srq = ipoib_cm_has_srq(dev); + rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; + + skb = rx_ring[wr_id].skb; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + ipoib_dbg(priv, + "cm recv error (status=%d, wrid=%d vend_err %#x)\n", + wc->status, wr_id, wc->vendor_err); + ++dev->stats.rx_dropped; + if (has_srq) + goto repost; + else { + if (!--p->recv_count) { + spin_lock_irqsave(&priv->lock, flags); + list_move(&p->list, &priv->cm.rx_reap_list); + spin_unlock_irqrestore(&priv->lock, flags); + queue_work(priv->wq, &priv->cm.rx_reap_task); + } + return; + } + } + + if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { + if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { + spin_lock_irqsave(&priv->lock, flags); + p->jiffies = jiffies; + /* Move this entry to list head, but do not re-add it + * if it has been moved out of list. */ + if (p->state == IPOIB_CM_RX_LIVE) + list_move(&p->list, &priv->cm.passive_ids); + spin_unlock_irqrestore(&priv->lock, flags); + } + } + + if (wc->byte_len < IPOIB_CM_COPYBREAK) { + int dlen = wc->byte_len; + + small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE); + if (small_skb) { + skb_reserve(small_skb, IPOIB_CM_RX_RESERVE); + ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], + dlen, DMA_FROM_DEVICE); + skb_copy_from_linear_data(skb, small_skb->data, dlen); + ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], + dlen, DMA_FROM_DEVICE); + skb_put(small_skb, dlen); + skb = small_skb; + goto copied; + } + } + + frags = PAGE_ALIGN(wc->byte_len - + min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) / + PAGE_SIZE; + + newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, + mapping, GFP_ATOMIC); + if (unlikely(!newskb)) { + /* + * If we can't allocate a new RX buffer, dump + * this packet and reuse the old buffer. + */ + ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); + ++dev->stats.rx_dropped; + goto repost; + } + + ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); + memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping)); + + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); + + skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); + +copied: + skb->protocol = ((struct ipoib_header *) skb->data)->proto; + skb_add_pseudo_hdr(skb); + + ++dev->stats.rx_packets; + dev->stats.rx_bytes += skb->len; + + skb->dev = dev; + /* XXX get correct PACKET_ type here */ + skb->pkt_type = PACKET_HOST; + netif_receive_skb(skb); + +repost: + if (has_srq) { + if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id))) + ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " + "for buf %d\n", wr_id); + } else { + if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, + &priv->cm.rx_wr, + priv->cm.rx_sge, + wr_id))) { + --p->recv_count; + ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " + "for buf %d\n", wr_id); + } + } +} + +static inline int post_send(struct ipoib_dev_priv *priv, + struct ipoib_cm_tx *tx, + unsigned int wr_id, + struct ipoib_tx_buf *tx_req) +{ + ipoib_build_sge(priv, tx_req); + + priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM; + + return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL); +} + +void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_tx_buf *tx_req; + int rc; + unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb); + + if (unlikely(skb->len > tx->mtu)) { + ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", + skb->len, tx->mtu); + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; + ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); + return; + } + if (skb_shinfo(skb)->nr_frags > usable_sge) { + if (skb_linearize(skb) < 0) { + ipoib_warn(priv, "skb could not be linearized\n"); + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; + dev_kfree_skb_any(skb); + return; + } + /* Does skb_linearize return ok without reducing nr_frags? */ + if (skb_shinfo(skb)->nr_frags > usable_sge) { + ipoib_warn(priv, "too many frags after skb linearize\n"); + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; + dev_kfree_skb_any(skb); + return; + } + } + ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", + tx->tx_head, skb->len, tx->qp->qp_num); + + /* + * We put the skb into the tx_ring _before_ we call post_send() + * because it's entirely possible that the completion handler will + * run before we execute anything after the post_send(). That + * means we have to make sure everything is properly recorded and + * our state is consistent before we call post_send(). + */ + tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; + tx_req->skb = skb; + + if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { + ++dev->stats.tx_errors; + dev_kfree_skb_any(skb); + return; + } + + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { + ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", + tx->qp->qp_num); + netif_stop_queue(dev); + } + + skb_orphan(skb); + skb_dst_drop(skb); + + if (netif_queue_stopped(dev)) { + rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS); + if (unlikely(rc < 0)) + ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n"); + else if (rc) + napi_schedule(&priv->send_napi); + } + + rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); + if (unlikely(rc)) { + ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc); + ++dev->stats.tx_errors; + ipoib_dma_unmap_tx(priv, tx_req); + dev_kfree_skb_any(skb); + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + } else { + netif_trans_update(dev); + ++tx->tx_head; + ++priv->global_tx_head; + } +} + +void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_tx *tx = wc->qp->qp_context; + unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; + struct ipoib_tx_buf *tx_req; + unsigned long flags; + + ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", + wr_id, wc->status); + + if (unlikely(wr_id >= ipoib_sendq_size)) { + ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", + wr_id, ipoib_sendq_size); + return; + } + + tx_req = &tx->tx_ring[wr_id]; + + ipoib_dma_unmap_tx(priv, tx_req); + + /* FIXME: is this right? Shouldn't we only increment on success? */ + ++dev->stats.tx_packets; + dev->stats.tx_bytes += tx_req->skb->len; + + dev_kfree_skb_any(tx_req->skb); + + netif_tx_lock(dev); + + ++tx->tx_tail; + ++priv->global_tx_tail; + + if (unlikely(netif_queue_stopped(dev) && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) + netif_wake_queue(dev); + + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) { + struct ipoib_neigh *neigh; + + /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle, + * so don't make waves. + */ + if (wc->status == IB_WC_RNR_RETRY_EXC_ERR || + wc->status == IB_WC_RETRY_EXC_ERR) + ipoib_dbg(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n", + __func__, wc->status, wr_id, wc->vendor_err); + else + ipoib_warn(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n", + __func__, wc->status, wr_id, wc->vendor_err); + + spin_lock_irqsave(&priv->lock, flags); + neigh = tx->neigh; + + if (neigh) { + neigh->cm = NULL; + ipoib_neigh_free(neigh); + + tx->neigh = NULL; + } + + if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { + list_move(&tx->list, &priv->cm.reap_list); + queue_work(priv->wq, &priv->cm.reap_task); + } + + clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); + + spin_unlock_irqrestore(&priv->lock, flags); + } + + netif_tx_unlock(dev); +} + +int ipoib_cm_dev_open(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret; + + if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) + return 0; + + priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); + if (IS_ERR(priv->cm.id)) { + pr_warn("%s: failed to create CM ID\n", priv->ca->name); + ret = PTR_ERR(priv->cm.id); + goto err_cm; + } + + ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), + 0); + if (ret) { + pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name, + IPOIB_CM_IETF_ID | priv->qp->qp_num); + goto err_listen; + } + + return 0; + +err_listen: + ib_destroy_cm_id(priv->cm.id); +err_cm: + priv->cm.id = NULL; + return ret; +} + +static void ipoib_cm_free_rx_reap_list(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_rx *rx, *n; + LIST_HEAD(list); + + spin_lock_irq(&priv->lock); + list_splice_init(&priv->cm.rx_reap_list, &list); + spin_unlock_irq(&priv->lock); + + list_for_each_entry_safe(rx, n, &list, list) { + ib_destroy_cm_id(rx->id); + ib_destroy_qp(rx->qp); + if (!ipoib_cm_has_srq(dev)) { + ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring); + spin_lock_irq(&priv->lock); + --priv->cm.nonsrq_conn_qp; + spin_unlock_irq(&priv->lock); + } + kfree(rx); + } +} + +void ipoib_cm_dev_stop(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_rx *p; + unsigned long begin; + int ret; + + if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) + return; + + ib_destroy_cm_id(priv->cm.id); + priv->cm.id = NULL; + + spin_lock_irq(&priv->lock); + while (!list_empty(&priv->cm.passive_ids)) { + p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); + list_move(&p->list, &priv->cm.rx_error_list); + p->state = IPOIB_CM_RX_ERROR; + spin_unlock_irq(&priv->lock); + ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); + if (ret) + ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); + spin_lock_irq(&priv->lock); + } + + /* Wait for all RX to be drained */ + begin = jiffies; + + while (!list_empty(&priv->cm.rx_error_list) || + !list_empty(&priv->cm.rx_flush_list) || + !list_empty(&priv->cm.rx_drain_list)) { + if (time_after(jiffies, begin + 5 * HZ)) { + ipoib_warn(priv, "RX drain timing out\n"); + + /* + * assume the HW is wedged and just free up everything. + */ + list_splice_init(&priv->cm.rx_flush_list, + &priv->cm.rx_reap_list); + list_splice_init(&priv->cm.rx_error_list, + &priv->cm.rx_reap_list); + list_splice_init(&priv->cm.rx_drain_list, + &priv->cm.rx_reap_list); + break; + } + spin_unlock_irq(&priv->lock); + usleep_range(1000, 2000); + ipoib_drain_cq(dev); + spin_lock_irq(&priv->lock); + } + + spin_unlock_irq(&priv->lock); + + ipoib_cm_free_rx_reap_list(dev); + + cancel_delayed_work(&priv->cm.stale_task); +} + +static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event) +{ + struct ipoib_cm_tx *p = cm_id->context; + struct ipoib_dev_priv *priv = ipoib_priv(p->dev); + struct ipoib_cm_data *data = event->private_data; + struct sk_buff_head skqueue; + struct ib_qp_attr qp_attr; + int qp_attr_mask, ret; + struct sk_buff *skb; + + p->mtu = be32_to_cpu(data->mtu); + + if (p->mtu <= IPOIB_ENCAP_LEN) { + ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", + p->mtu, IPOIB_ENCAP_LEN); + return -EINVAL; + } + + qp_attr.qp_state = IB_QPS_RTR; + ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); + return ret; + } + + qp_attr.rq_psn = 0 /* FIXME */; + ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); + return ret; + } + + qp_attr.qp_state = IB_QPS_RTS; + ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); + return ret; + } + ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); + return ret; + } + + skb_queue_head_init(&skqueue); + + netif_tx_lock_bh(p->dev); + spin_lock_irq(&priv->lock); + set_bit(IPOIB_FLAG_OPER_UP, &p->flags); + if (p->neigh) + while ((skb = __skb_dequeue(&p->neigh->queue))) + __skb_queue_tail(&skqueue, skb); + spin_unlock_irq(&priv->lock); + netif_tx_unlock_bh(p->dev); + + while ((skb = __skb_dequeue(&skqueue))) { + skb->dev = p->dev; + ret = dev_queue_xmit(skb); + if (ret) + ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n", + __func__, ret); + } + + ret = ib_send_cm_rtu(cm_id, NULL, 0); + if (ret) { + ipoib_warn(priv, "failed to send RTU: %d\n", ret); + return ret; + } + return 0; +} + +static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_qp_init_attr attr = { + .send_cq = priv->send_cq, + .recv_cq = priv->recv_cq, + .srq = priv->cm.srq, + .cap.max_send_wr = ipoib_sendq_size, + .cap.max_send_sge = 1, + .sq_sig_type = IB_SIGNAL_ALL_WR, + .qp_type = IB_QPT_RC, + .qp_context = tx, + .create_flags = 0 + }; + struct ib_qp *tx_qp; + + if (dev->features & NETIF_F_SG) + attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge, + MAX_SKB_FRAGS + 1); + + tx_qp = ib_create_qp(priv->pd, &attr); + tx->max_send_sge = attr.cap.max_send_sge; + return tx_qp; +} + +static int ipoib_cm_send_req(struct net_device *dev, + struct ib_cm_id *id, struct ib_qp *qp, + u32 qpn, + struct sa_path_rec *pathrec) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_data data = {}; + struct ib_cm_req_param req = {}; + + data.qpn = cpu_to_be32(priv->qp->qp_num); + data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); + + req.primary_path = pathrec; + req.alternate_path = NULL; + req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); + req.qp_num = qp->qp_num; + req.qp_type = qp->qp_type; + req.private_data = &data; + req.private_data_len = sizeof(data); + req.flow_control = 0; + + req.starting_psn = 0; /* FIXME */ + + /* + * Pick some arbitrary defaults here; we could make these + * module parameters if anyone cared about setting them. + */ + req.responder_resources = 4; + req.remote_cm_response_timeout = 20; + req.local_cm_response_timeout = 20; + req.retry_count = 0; /* RFC draft warns against retries */ + req.rnr_retry_count = 0; /* RFC draft warns against retries */ + req.max_cm_retries = 15; + req.srq = ipoib_cm_has_srq(dev); + return ib_send_cm_req(id, &req); +} + +static int ipoib_cm_modify_tx_init(struct net_device *dev, + struct ib_cm_id *cm_id, struct ib_qp *qp) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_qp_attr qp_attr; + int qp_attr_mask, ret; + ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); + if (ret) { + ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); + return ret; + } + + qp_attr.qp_state = IB_QPS_INIT; + qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; + qp_attr.port_num = priv->port; + qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; + + ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); + return ret; + } + return 0; +} + +static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, + struct sa_path_rec *pathrec) +{ + struct ipoib_dev_priv *priv = ipoib_priv(p->dev); + unsigned int noio_flag; + int ret; + + noio_flag = memalloc_noio_save(); + p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring))); + if (!p->tx_ring) { + memalloc_noio_restore(noio_flag); + ret = -ENOMEM; + goto err_tx; + } + memset(p->tx_ring, 0, ipoib_sendq_size * sizeof(*p->tx_ring)); + + p->qp = ipoib_cm_create_tx_qp(p->dev, p); + memalloc_noio_restore(noio_flag); + if (IS_ERR(p->qp)) { + ret = PTR_ERR(p->qp); + ipoib_warn(priv, "failed to create tx qp: %d\n", ret); + goto err_qp; + } + + p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); + if (IS_ERR(p->id)) { + ret = PTR_ERR(p->id); + ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); + goto err_id; + } + + ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); + if (ret) { + ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); + goto err_modify_send; + } + + ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); + if (ret) { + ipoib_warn(priv, "failed to send cm req: %d\n", ret); + goto err_modify_send; + } + + ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", + p->qp->qp_num, pathrec->dgid.raw, qpn); + + return 0; + +err_modify_send: + ib_destroy_cm_id(p->id); +err_id: + p->id = NULL; + ib_destroy_qp(p->qp); +err_qp: + p->qp = NULL; + vfree(p->tx_ring); +err_tx: + return ret; +} + +static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) +{ + struct ipoib_dev_priv *priv = ipoib_priv(p->dev); + struct ipoib_tx_buf *tx_req; + unsigned long begin; + + ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", + p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); + + if (p->id) + ib_destroy_cm_id(p->id); + + if (p->tx_ring) { + /* Wait for all sends to complete */ + begin = jiffies; + while ((int) p->tx_tail - (int) p->tx_head < 0) { + if (time_after(jiffies, begin + 5 * HZ)) { + ipoib_warn(priv, "timing out; %d sends not completed\n", + p->tx_head - p->tx_tail); + goto timeout; + } + + usleep_range(1000, 2000); + } + } + +timeout: + + while ((int) p->tx_tail - (int) p->tx_head < 0) { + tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; + ipoib_dma_unmap_tx(priv, tx_req); + dev_kfree_skb_any(tx_req->skb); + netif_tx_lock_bh(p->dev); + ++p->tx_tail; + ++priv->global_tx_tail; + if (unlikely((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && + netif_queue_stopped(p->dev) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + netif_wake_queue(p->dev); + netif_tx_unlock_bh(p->dev); + } + + if (p->qp) + ib_destroy_qp(p->qp); + + vfree(p->tx_ring); + kfree(p); +} + +static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event) +{ + struct ipoib_cm_tx *tx = cm_id->context; + struct ipoib_dev_priv *priv = ipoib_priv(tx->dev); + struct net_device *dev = priv->dev; + struct ipoib_neigh *neigh; + unsigned long flags; + int ret; + + switch (event->event) { + case IB_CM_DREQ_RECEIVED: + ipoib_dbg(priv, "DREQ received.\n"); + ib_send_cm_drep(cm_id, NULL, 0); + break; + case IB_CM_REP_RECEIVED: + ipoib_dbg(priv, "REP received.\n"); + ret = ipoib_cm_rep_handler(cm_id, event); + if (ret) + ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, + NULL, 0, NULL, 0); + break; + case IB_CM_REQ_ERROR: + case IB_CM_REJ_RECEIVED: + case IB_CM_TIMEWAIT_EXIT: + ipoib_dbg(priv, "CM error %d.\n", event->event); + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + neigh = tx->neigh; + + if (neigh) { + neigh->cm = NULL; + ipoib_neigh_free(neigh); + + tx->neigh = NULL; + } + + if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { + list_move(&tx->list, &priv->cm.reap_list); + queue_work(priv->wq, &priv->cm.reap_task); + } + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); + break; + default: + break; + } + + return 0; +} + +struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, + struct ipoib_neigh *neigh) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_cm_tx *tx; + + tx = kzalloc(sizeof(*tx), GFP_ATOMIC); + if (!tx) + return NULL; + + neigh->cm = tx; + tx->neigh = neigh; + tx->dev = dev; + list_add(&tx->list, &priv->cm.start_list); + set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); + queue_work(priv->wq, &priv->cm.start_task); + return tx; +} + +void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) +{ + struct ipoib_dev_priv *priv = ipoib_priv(tx->dev); + unsigned long flags; + if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { + spin_lock_irqsave(&priv->lock, flags); + list_move(&tx->list, &priv->cm.reap_list); + queue_work(priv->wq, &priv->cm.reap_task); + ipoib_dbg(priv, "Reap connection for gid %pI6\n", + tx->neigh->daddr + 4); + tx->neigh = NULL; + spin_unlock_irqrestore(&priv->lock, flags); + } +} + +#define QPN_AND_OPTIONS_OFFSET 4 + +static void ipoib_cm_tx_start(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, + cm.start_task); + struct net_device *dev = priv->dev; + struct ipoib_neigh *neigh; + struct ipoib_cm_tx *p; + unsigned long flags; + struct ipoib_path *path; + int ret; + + struct sa_path_rec pathrec; + u32 qpn; + + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + + while (!list_empty(&priv->cm.start_list)) { + p = list_entry(priv->cm.start_list.next, typeof(*p), list); + list_del_init(&p->list); + neigh = p->neigh; + + qpn = IPOIB_QPN(neigh->daddr); + /* + * As long as the search is with these 2 locks, + * path existence indicates its validity. + */ + path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); + if (!path) { + pr_info("%s ignore not valid path %pI6\n", + __func__, + neigh->daddr + QPN_AND_OPTIONS_OFFSET); + goto free_neigh; + } + memcpy(&pathrec, &path->pathrec, sizeof(pathrec)); + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); + + ret = ipoib_cm_tx_init(p, qpn, &pathrec); + + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + + if (ret) { +free_neigh: + neigh = p->neigh; + if (neigh) { + neigh->cm = NULL; + ipoib_neigh_free(neigh); + } + list_del(&p->list); + kfree(p); + } + } + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); +} + +static void ipoib_cm_tx_reap(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, + cm.reap_task); + struct net_device *dev = priv->dev; + struct ipoib_cm_tx *p; + unsigned long flags; + + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + + while (!list_empty(&priv->cm.reap_list)) { + p = list_entry(priv->cm.reap_list.next, typeof(*p), list); + list_del_init(&p->list); + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); + ipoib_cm_tx_destroy(p); + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + } + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); +} + +static void ipoib_cm_skb_reap(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, + cm.skb_task); + struct net_device *dev = priv->dev; + struct sk_buff *skb; + unsigned long flags; + unsigned int mtu = priv->mcast_mtu; + + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + + while ((skb = skb_dequeue(&priv->cm.skb_queue))) { + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); + + if (skb->protocol == htons(ETH_P_IP)) { + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) { + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + } +#endif + dev_kfree_skb_any(skb); + + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + } + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); +} + +void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, + unsigned int mtu) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int e = skb_queue_empty(&priv->cm.skb_queue); + + skb_dst_update_pmtu(skb, mtu); + + skb_queue_tail(&priv->cm.skb_queue, skb); + if (e) + queue_work(priv->wq, &priv->cm.skb_task); +} + +static void ipoib_cm_rx_reap(struct work_struct *work) +{ + ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv, + cm.rx_reap_task)->dev); +} + +static void ipoib_cm_stale_task(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, + cm.stale_task.work); + struct ipoib_cm_rx *p; + int ret; + + spin_lock_irq(&priv->lock); + while (!list_empty(&priv->cm.passive_ids)) { + /* List is sorted by LRU, start from tail, + * stop when we see a recently used entry */ + p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); + if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) + break; + list_move(&p->list, &priv->cm.rx_error_list); + p->state = IPOIB_CM_RX_ERROR; + spin_unlock_irq(&priv->lock); + ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); + if (ret) + ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); + spin_lock_irq(&priv->lock); + } + + if (!list_empty(&priv->cm.passive_ids)) + queue_delayed_work(priv->wq, + &priv->cm.stale_task, IPOIB_CM_RX_DELAY); + spin_unlock_irq(&priv->lock); +} + +static ssize_t show_mode(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct net_device *dev = to_net_dev(d); + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) + return sprintf(buf, "connected\n"); + else + return sprintf(buf, "datagram\n"); +} + +static ssize_t set_mode(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *dev = to_net_dev(d); + int ret; + + if (!rtnl_trylock()) { + return restart_syscall(); + } + + if (dev->reg_state != NETREG_REGISTERED) { + rtnl_unlock(); + return -EPERM; + } + + ret = ipoib_set_mode(dev, buf); + + /* The assumption is that the function ipoib_set_mode returned + * with the rtnl held by it, if not the value -EBUSY returned, + * then no need to rtnl_unlock + */ + if (ret != -EBUSY) + rtnl_unlock(); + + return (!ret || ret == -EBUSY) ? count : ret; +} + +static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); + +int ipoib_cm_add_mode_attr(struct net_device *dev) +{ + return device_create_file(&dev->dev, &dev_attr_mode); +} + +static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_srq_init_attr srq_init_attr = { + .srq_type = IB_SRQT_BASIC, + .attr = { + .max_wr = ipoib_recvq_size, + .max_sge = max_sge + } + }; + + priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); + if (IS_ERR(priv->cm.srq)) { + if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP) + pr_warn("%s: failed to allocate SRQ, error %ld\n", + priv->ca->name, PTR_ERR(priv->cm.srq)); + priv->cm.srq = NULL; + return; + } + + priv->cm.srq_ring = vzalloc(array_size(ipoib_recvq_size, + sizeof(*priv->cm.srq_ring))); + if (!priv->cm.srq_ring) { + ib_destroy_srq(priv->cm.srq); + priv->cm.srq = NULL; + return; + } + +} + +int ipoib_cm_dev_init(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int max_srq_sge, i; + + INIT_LIST_HEAD(&priv->cm.passive_ids); + INIT_LIST_HEAD(&priv->cm.reap_list); + INIT_LIST_HEAD(&priv->cm.start_list); + INIT_LIST_HEAD(&priv->cm.rx_error_list); + INIT_LIST_HEAD(&priv->cm.rx_flush_list); + INIT_LIST_HEAD(&priv->cm.rx_drain_list); + INIT_LIST_HEAD(&priv->cm.rx_reap_list); + INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); + INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); + INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); + INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); + INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); + + skb_queue_head_init(&priv->cm.skb_queue); + + ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge); + + max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge); + ipoib_cm_create_srq(dev, max_srq_sge); + if (ipoib_cm_has_srq(dev)) { + priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10; + priv->cm.num_frags = max_srq_sge; + ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", + priv->cm.max_cm_mtu, priv->cm.num_frags); + } else { + priv->cm.max_cm_mtu = IPOIB_CM_MTU; + priv->cm.num_frags = IPOIB_CM_RX_SG; + } + + ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge); + + if (ipoib_cm_has_srq(dev)) { + for (i = 0; i < ipoib_recvq_size; ++i) { + if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, + priv->cm.num_frags - 1, + priv->cm.srq_ring[i].mapping, + GFP_KERNEL)) { + ipoib_warn(priv, "failed to allocate " + "receive buffer %d\n", i); + ipoib_cm_dev_cleanup(dev); + return -ENOMEM; + } + + if (ipoib_cm_post_receive_srq(dev, i)) { + ipoib_warn(priv, "ipoib_cm_post_receive_srq " + "failed for buf %d\n", i); + ipoib_cm_dev_cleanup(dev); + return -EIO; + } + } + } + + priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; + return 0; +} + +void ipoib_cm_dev_cleanup(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret; + + if (!priv->cm.srq) + return; + + ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); + + ret = ib_destroy_srq(priv->cm.srq); + if (ret) + ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); + + priv->cm.srq = NULL; + if (!priv->cm.srq_ring) + return; + + ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring); + priv->cm.srq_ring = NULL; +} diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c new file mode 100644 index 000000000..83429925d --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> + +#include "ipoib.h" + +struct ipoib_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define IPOIB_NETDEV_STAT(m) { \ + .stat_string = #m, \ + .stat_offset = offsetof(struct rtnl_link_stats64, m) } + +static const struct ipoib_stats ipoib_gstrings_stats[] = { + IPOIB_NETDEV_STAT(rx_packets), + IPOIB_NETDEV_STAT(tx_packets), + IPOIB_NETDEV_STAT(rx_bytes), + IPOIB_NETDEV_STAT(tx_bytes), + IPOIB_NETDEV_STAT(tx_errors), + IPOIB_NETDEV_STAT(rx_dropped), + IPOIB_NETDEV_STAT(tx_dropped), + IPOIB_NETDEV_STAT(multicast), +}; + +#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) + +static void ipoib_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ipoib_dev_priv *priv = ipoib_priv(netdev); + + ib_get_device_fw_str(priv->ca, drvinfo->fw_version); + + strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent), + sizeof(drvinfo->bus_info)); + + strlcpy(drvinfo->version, ipoib_driver_version, + sizeof(drvinfo->version)); + + strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver)); +} + +static int ipoib_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs; + coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames; + + return 0; +} + +static int ipoib_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret; + + /* + * These values are saved in the private data and returned + * when ipoib_get_coalesce() is called + */ + if (coal->rx_coalesce_usecs > 0xffff || + coal->rx_max_coalesced_frames > 0xffff) + return -EINVAL; + + ret = rdma_set_cq_moderation(priv->recv_cq, + coal->rx_max_coalesced_frames, + coal->rx_coalesce_usecs); + if (ret && ret != -EOPNOTSUPP) { + ipoib_warn(priv, "failed modifying CQ (%d)\n", ret); + return ret; + } + + priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs; + priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames; + + return 0; +} +static void ipoib_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + int i; + struct net_device_stats *net_stats = &dev->stats; + u8 *p = (u8 *)net_stats; + + for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) + data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset); + +} +static void ipoib_get_strings(struct net_device __always_unused *dev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) { + memcpy(p, ipoib_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + case ETH_SS_TEST: + default: + break; + } +} +static int ipoib_get_sset_count(struct net_device __always_unused *dev, + int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IPOIB_GLOBAL_STATS_LEN; + case ETH_SS_TEST: + default: + break; + } + return -EOPNOTSUPP; +} + +/* Return lane speed in unit of 1e6 bit/sec */ +static inline int ib_speed_enum_to_int(int speed) +{ + switch (speed) { + case IB_SPEED_SDR: + return SPEED_2500; + case IB_SPEED_DDR: + return SPEED_5000; + case IB_SPEED_QDR: + case IB_SPEED_FDR10: + return SPEED_10000; + case IB_SPEED_FDR: + return SPEED_14000; + case IB_SPEED_EDR: + return SPEED_25000; + } + + return SPEED_UNKNOWN; +} + +static int ipoib_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct ipoib_dev_priv *priv = ipoib_priv(netdev); + struct ib_port_attr attr; + int ret, speed, width; + + if (!netif_carrier_ok(netdev)) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + return 0; + } + + ret = ib_query_port(priv->ca, priv->port, &attr); + if (ret < 0) + return -EINVAL; + + speed = ib_speed_enum_to_int(attr.active_speed); + width = ib_width_enum_to_int(attr.active_width); + + if (speed < 0 || width < 0) + return -EINVAL; + + /* Except the following are set, the other members of + * the struct ethtool_link_settings are initialized to + * zero in the function __ethtool_get_link_ksettings. + */ + cmd->base.speed = speed * width; + cmd->base.duplex = DUPLEX_FULL; + + cmd->base.phy_address = 0xFF; + + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.port = PORT_OTHER; + + return 0; +} + +static const struct ethtool_ops ipoib_ethtool_ops = { + .get_link_ksettings = ipoib_get_link_ksettings, + .get_drvinfo = ipoib_get_drvinfo, + .get_coalesce = ipoib_get_coalesce, + .set_coalesce = ipoib_set_coalesce, + .get_strings = ipoib_get_strings, + .get_ethtool_stats = ipoib_get_ethtool_stats, + .get_sset_count = ipoib_get_sset_count, +}; + +void ipoib_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ipoib_ethtool_ops; +} diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c new file mode 100644 index 000000000..178488028 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/err.h> +#include <linux/seq_file.h> +#include <linux/slab.h> + +struct file_operations; + +#include <linux/debugfs.h> +#include <linux/export.h> + +#include "ipoib.h" + +static struct dentry *ipoib_root; + +static void format_gid(union ib_gid *gid, char *buf) +{ + int i, n; + + for (n = 0, i = 0; i < 8; ++i) { + n += sprintf(buf + n, "%x", + be16_to_cpu(((__be16 *) gid->raw)[i])); + if (i < 7) + buf[n++] = ':'; + } +} + +static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos) +{ + struct ipoib_mcast_iter *iter; + loff_t n = *pos; + + iter = ipoib_mcast_iter_init(file->private); + if (!iter) + return NULL; + + while (n--) { + if (ipoib_mcast_iter_next(iter)) { + kfree(iter); + return NULL; + } + } + + return iter; +} + +static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr, + loff_t *pos) +{ + struct ipoib_mcast_iter *iter = iter_ptr; + + (*pos)++; + + if (ipoib_mcast_iter_next(iter)) { + kfree(iter); + return NULL; + } + + return iter; +} + +static void ipoib_mcg_seq_stop(struct seq_file *file, void *iter_ptr) +{ + /* nothing for now */ +} + +static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr) +{ + struct ipoib_mcast_iter *iter = iter_ptr; + char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"]; + union ib_gid mgid; + unsigned long created; + unsigned int queuelen, complete, send_only; + + if (!iter) + return 0; + + ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen, + &complete, &send_only); + + format_gid(&mgid, gid_buf); + + seq_printf(file, + "GID: %s\n" + " created: %10ld\n" + " queuelen: %9d\n" + " complete: %9s\n" + " send_only: %8s\n" + "\n", + gid_buf, created, queuelen, + complete ? "yes" : "no", + send_only ? "yes" : "no"); + + return 0; +} + +static const struct seq_operations ipoib_mcg_seq_ops = { + .start = ipoib_mcg_seq_start, + .next = ipoib_mcg_seq_next, + .stop = ipoib_mcg_seq_stop, + .show = ipoib_mcg_seq_show, +}; + +static int ipoib_mcg_open(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int ret; + + ret = seq_open(file, &ipoib_mcg_seq_ops); + if (ret) + return ret; + + seq = file->private_data; + seq->private = inode->i_private; + + return 0; +} + +static const struct file_operations ipoib_mcg_fops = { + .owner = THIS_MODULE, + .open = ipoib_mcg_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos) +{ + struct ipoib_path_iter *iter; + loff_t n = *pos; + + iter = ipoib_path_iter_init(file->private); + if (!iter) + return NULL; + + while (n--) { + if (ipoib_path_iter_next(iter)) { + kfree(iter); + return NULL; + } + } + + return iter; +} + +static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr, + loff_t *pos) +{ + struct ipoib_path_iter *iter = iter_ptr; + + (*pos)++; + + if (ipoib_path_iter_next(iter)) { + kfree(iter); + return NULL; + } + + return iter; +} + +static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr) +{ + /* nothing for now */ +} + +static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr) +{ + struct ipoib_path_iter *iter = iter_ptr; + char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"]; + struct ipoib_path path; + int rate; + + if (!iter) + return 0; + + ipoib_path_iter_read(iter, &path); + + format_gid(&path.pathrec.dgid, gid_buf); + + seq_printf(file, + "GID: %s\n" + " complete: %6s\n", + gid_buf, sa_path_get_dlid(&path.pathrec) ? "yes" : "no"); + + if (sa_path_get_dlid(&path.pathrec)) { + rate = ib_rate_to_mbps(path.pathrec.rate); + + seq_printf(file, + " DLID: 0x%04x\n" + " SL: %12d\n" + " rate: %8d.%d Gb/sec\n", + be32_to_cpu(sa_path_get_dlid(&path.pathrec)), + path.pathrec.sl, + rate / 1000, rate % 1000); + } + + seq_putc(file, '\n'); + + return 0; +} + +static const struct seq_operations ipoib_path_seq_ops = { + .start = ipoib_path_seq_start, + .next = ipoib_path_seq_next, + .stop = ipoib_path_seq_stop, + .show = ipoib_path_seq_show, +}; + +static int ipoib_path_open(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int ret; + + ret = seq_open(file, &ipoib_path_seq_ops); + if (ret) + return ret; + + seq = file->private_data; + seq->private = inode->i_private; + + return 0; +} + +static const struct file_operations ipoib_path_fops = { + .owner = THIS_MODULE, + .open = ipoib_path_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +void ipoib_create_debug_files(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + char name[IFNAMSIZ + sizeof("_path")]; + + snprintf(name, sizeof(name), "%s_mcg", dev->name); + priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + ipoib_root, dev, &ipoib_mcg_fops); + if (!priv->mcg_dentry) + ipoib_warn(priv, "failed to create mcg debug file\n"); + + snprintf(name, sizeof(name), "%s_path", dev->name); + priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + ipoib_root, dev, &ipoib_path_fops); + if (!priv->path_dentry) + ipoib_warn(priv, "failed to create path debug file\n"); +} + +void ipoib_delete_debug_files(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + debugfs_remove(priv->mcg_dentry); + debugfs_remove(priv->path_dentry); + priv->mcg_dentry = priv->path_dentry = NULL; +} + +int ipoib_register_debugfs(void) +{ + ipoib_root = debugfs_create_dir("ipoib", NULL); + return ipoib_root ? 0 : -ENOMEM; +} + +void ipoib_unregister_debugfs(void) +{ + debugfs_remove(ipoib_root); +} diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c new file mode 100644 index 000000000..82b9c5b6e --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -0,0 +1,1312 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/delay.h> +#include <linux/moduleparam.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> + +#include <linux/ip.h> +#include <linux/tcp.h> +#include <rdma/ib_cache.h> + +#include "ipoib.h" + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA +static int data_debug_level; + +module_param(data_debug_level, int, 0644); +MODULE_PARM_DESC(data_debug_level, + "Enable data path debug tracing if > 0"); +#endif + +struct ipoib_ah *ipoib_create_ah(struct net_device *dev, + struct ib_pd *pd, struct rdma_ah_attr *attr) +{ + struct ipoib_ah *ah; + struct ib_ah *vah; + + ah = kmalloc(sizeof(*ah), GFP_KERNEL); + if (!ah) + return ERR_PTR(-ENOMEM); + + ah->dev = dev; + ah->last_send = 0; + kref_init(&ah->ref); + + vah = rdma_create_ah(pd, attr); + if (IS_ERR(vah)) { + kfree(ah); + ah = (struct ipoib_ah *)vah; + } else { + ah->ah = vah; + ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah); + } + + return ah; +} + +void ipoib_free_ah(struct kref *kref) +{ + struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); + struct ipoib_dev_priv *priv = ipoib_priv(ah->dev); + + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + list_add_tail(&ah->list, &priv->dead_ahs); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, + u64 mapping[IPOIB_UD_RX_SG]) +{ + ib_dma_unmap_single(priv->ca, mapping[0], + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); +} + +static int ipoib_ib_post_receive(struct net_device *dev, int id) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret; + + priv->rx_wr.wr_id = id | IPOIB_OP_RECV; + priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; + priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; + + + ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL); + if (unlikely(ret)) { + ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); + ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); + dev_kfree_skb_any(priv->rx_ring[id].skb); + priv->rx_ring[id].skb = NULL; + } + + return ret; +} + +static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct sk_buff *skb; + int buf_size; + u64 *mapping; + + buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); + + skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN); + if (unlikely(!skb)) + return NULL; + + /* + * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is + * 64 bytes aligned + */ + skb_reserve(skb, sizeof(struct ipoib_pseudo_header)); + + mapping = priv->rx_ring[id].mapping; + mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, + DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) + goto error; + + priv->rx_ring[id].skb = skb; + return skb; +error: + dev_kfree_skb_any(skb); + return NULL; +} + +static int ipoib_ib_post_receives(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int i; + + for (i = 0; i < ipoib_recvq_size; ++i) { + if (!ipoib_alloc_rx_skb(dev, i)) { + ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); + return -ENOMEM; + } + if (ipoib_ib_post_receive(dev, i)) { + ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); + return -EIO; + } + } + + return 0; +} + +static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; + struct sk_buff *skb; + u64 mapping[IPOIB_UD_RX_SG]; + union ib_gid *dgid; + union ib_gid *sgid; + + ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", + wr_id, wc->status); + + if (unlikely(wr_id >= ipoib_recvq_size)) { + ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", + wr_id, ipoib_recvq_size); + return; + } + + skb = priv->rx_ring[wr_id].skb; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, + "failed recv event (status=%d, wrid=%d vend_err %#x)\n", + wc->status, wr_id, wc->vendor_err); + ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); + dev_kfree_skb_any(skb); + priv->rx_ring[wr_id].skb = NULL; + return; + } + + memcpy(mapping, priv->rx_ring[wr_id].mapping, + IPOIB_UD_RX_SG * sizeof(*mapping)); + + /* + * If we can't allocate a new RX buffer, dump + * this packet and reuse the old buffer. + */ + if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { + ++dev->stats.rx_dropped; + goto repost; + } + + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); + + ipoib_ud_dma_unmap_rx(priv, mapping); + + skb_put(skb, wc->byte_len); + + /* First byte of dgid signals multicast when 0xff */ + dgid = &((struct ib_grh *)skb->data)->dgid; + + if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) + skb->pkt_type = PACKET_HOST; + else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0) + skb->pkt_type = PACKET_BROADCAST; + else + skb->pkt_type = PACKET_MULTICAST; + + sgid = &((struct ib_grh *)skb->data)->sgid; + + /* + * Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. + */ + if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) { + int need_repost = 1; + + if ((wc->wc_flags & IB_WC_GRH) && + sgid->global.interface_id != priv->local_gid.global.interface_id) + need_repost = 0; + + if (need_repost) { + dev_kfree_skb_any(skb); + goto repost; + } + } + + skb_pull(skb, IB_GRH_BYTES); + + skb->protocol = ((struct ipoib_header *) skb->data)->proto; + skb_add_pseudo_hdr(skb); + + ++dev->stats.rx_packets; + dev->stats.rx_bytes += skb->len; + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + skb->dev = dev; + if ((dev->features & NETIF_F_RXCSUM) && + likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + napi_gro_receive(&priv->recv_napi, skb); + +repost: + if (unlikely(ipoib_ib_post_receive(dev, wr_id))) + ipoib_warn(priv, "ipoib_ib_post_receive failed " + "for buf %d\n", wr_id); +} + +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) +{ + struct sk_buff *skb = tx_req->skb; + u64 *mapping = tx_req->mapping; + int i; + int off; + + if (skb_headlen(skb)) { + mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) + return -EIO; + + off = 1; + } else + off = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + mapping[i + off] = ib_dma_map_page(ca, + skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag), + DMA_TO_DEVICE); + if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) + goto partial_error; + } + return 0; + +partial_error: + for (; i > 0; --i) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + + ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); + } + + if (off) + ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); + + return -EIO; +} + +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) +{ + struct sk_buff *skb = tx_req->skb; + u64 *mapping = tx_req->mapping; + int i; + int off; + + if (skb_headlen(skb)) { + ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), + DMA_TO_DEVICE); + off = 1; + } else + off = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + ib_dma_unmap_page(priv->ca, mapping[i + off], + skb_frag_size(frag), DMA_TO_DEVICE); + } +} + +/* + * As the result of a completion error the QP Can be transferred to SQE states. + * The function checks if the (send)QP is in SQE state and + * moves it back to RTS state, that in order to have it functional again. + */ +static void ipoib_qp_state_validate_work(struct work_struct *work) +{ + struct ipoib_qp_state_validate *qp_work = + container_of(work, struct ipoib_qp_state_validate, work); + + struct ipoib_dev_priv *priv = qp_work->priv; + struct ib_qp_attr qp_attr; + struct ib_qp_init_attr query_init_attr; + int ret; + + ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); + if (ret) { + ipoib_warn(priv, "%s: Failed to query QP ret: %d\n", + __func__, ret); + goto free_res; + } + pr_info("%s: QP: 0x%x is in state: %d\n", + __func__, priv->qp->qp_num, qp_attr.qp_state); + + /* currently support only in SQE->RTS transition*/ + if (qp_attr.qp_state == IB_QPS_SQE) { + qp_attr.qp_state = IB_QPS_RTS; + + ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); + if (ret) { + pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n", + ret, priv->qp->qp_num); + goto free_res; + } + pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n", + __func__, priv->qp->qp_num); + } else { + pr_warn("QP (%d) will stay in state: %d\n", + priv->qp->qp_num, qp_attr.qp_state); + } + +free_res: + kfree(qp_work); +} + +static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + unsigned int wr_id = wc->wr_id; + struct ipoib_tx_buf *tx_req; + + ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", + wr_id, wc->status); + + if (unlikely(wr_id >= ipoib_sendq_size)) { + ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", + wr_id, ipoib_sendq_size); + return; + } + + tx_req = &priv->tx_ring[wr_id]; + + ipoib_dma_unmap_tx(priv, tx_req); + + ++dev->stats.tx_packets; + dev->stats.tx_bytes += tx_req->skb->len; + + dev_kfree_skb_any(tx_req->skb); + + ++priv->tx_tail; + ++priv->global_tx_tail; + + if (unlikely(netif_queue_stopped(dev) && + ((priv->global_tx_head - priv->global_tx_tail) <= + ipoib_sendq_size >> 1) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))) + netif_wake_queue(dev); + + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) { + struct ipoib_qp_state_validate *qp_work; + ipoib_warn(priv, + "failed send event (status=%d, wrid=%d vend_err %#x)\n", + wc->status, wr_id, wc->vendor_err); + qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC); + if (!qp_work) + return; + + INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work); + qp_work->priv = priv; + queue_work(priv->wq, &qp_work->work); + } +} + +static int poll_tx(struct ipoib_dev_priv *priv) +{ + int n, i; + struct ib_wc *wc; + + n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); + for (i = 0; i < n; ++i) { + wc = priv->send_wc + i; + if (wc->wr_id & IPOIB_OP_CM) + ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i); + else + ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i); + } + return n == MAX_SEND_CQE; +} + +int ipoib_rx_poll(struct napi_struct *napi, int budget) +{ + struct ipoib_dev_priv *priv = + container_of(napi, struct ipoib_dev_priv, recv_napi); + struct net_device *dev = priv->dev; + int done; + int t; + int n, i; + + done = 0; + +poll_more: + while (done < budget) { + int max = (budget - done); + + t = min(IPOIB_NUM_WC, max); + n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); + + for (i = 0; i < n; i++) { + struct ib_wc *wc = priv->ibwc + i; + + if (wc->wr_id & IPOIB_OP_RECV) { + ++done; + if (wc->wr_id & IPOIB_OP_CM) + ipoib_cm_handle_rx_wc(dev, wc); + else + ipoib_ib_handle_rx_wc(dev, wc); + } else { + pr_warn("%s: Got unexpected wqe id\n", __func__); + } + } + + if (n != t) + break; + } + + if (done < budget) { + napi_complete(napi); + if (unlikely(ib_req_notify_cq(priv->recv_cq, + IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS)) && + napi_reschedule(napi)) + goto poll_more; + } + + return done; +} + +int ipoib_tx_poll(struct napi_struct *napi, int budget) +{ + struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, + send_napi); + struct net_device *dev = priv->dev; + int n, i; + struct ib_wc *wc; + +poll_more: + n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); + + for (i = 0; i < n; i++) { + wc = priv->send_wc + i; + if (wc->wr_id & IPOIB_OP_CM) + ipoib_cm_handle_tx_wc(dev, wc); + else + ipoib_ib_handle_tx_wc(dev, wc); + } + + if (n < budget) { + napi_complete(napi); + if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS)) && + napi_reschedule(napi)) + goto poll_more; + } + return n < 0 ? 0 : n; +} + +void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr) +{ + struct ipoib_dev_priv *priv = ctx_ptr; + + napi_schedule(&priv->recv_napi); +} + +void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr) +{ + struct ipoib_dev_priv *priv = ctx_ptr; + + napi_schedule(&priv->send_napi); +} + +static inline int post_send(struct ipoib_dev_priv *priv, + unsigned int wr_id, + struct ib_ah *address, u32 dqpn, + struct ipoib_tx_buf *tx_req, + void *head, int hlen) +{ + struct sk_buff *skb = tx_req->skb; + + ipoib_build_sge(priv, tx_req); + + priv->tx_wr.wr.wr_id = wr_id; + priv->tx_wr.remote_qpn = dqpn; + priv->tx_wr.ah = address; + + if (head) { + priv->tx_wr.mss = skb_shinfo(skb)->gso_size; + priv->tx_wr.header = head; + priv->tx_wr.hlen = hlen; + priv->tx_wr.wr.opcode = IB_WR_LSO; + } else + priv->tx_wr.wr.opcode = IB_WR_SEND; + + return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL); +} + +int ipoib_send(struct net_device *dev, struct sk_buff *skb, + struct ib_ah *address, u32 dqpn) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_tx_buf *tx_req; + int hlen, rc; + void *phead; + unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb); + + if (skb_is_gso(skb)) { + hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + phead = skb->data; + if (unlikely(!skb_pull(skb, hlen))) { + ipoib_warn(priv, "linear data too small\n"); + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; + dev_kfree_skb_any(skb); + return -1; + } + } else { + if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { + ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", + skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; + ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); + return -1; + } + phead = NULL; + hlen = 0; + } + if (skb_shinfo(skb)->nr_frags > usable_sge) { + if (skb_linearize(skb) < 0) { + ipoib_warn(priv, "skb could not be linearized\n"); + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; + dev_kfree_skb_any(skb); + return -1; + } + /* Does skb_linearize return ok without reducing nr_frags? */ + if (skb_shinfo(skb)->nr_frags > usable_sge) { + ipoib_warn(priv, "too many frags after skb linearize\n"); + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; + dev_kfree_skb_any(skb); + return -1; + } + } + + ipoib_dbg_data(priv, + "sending packet, length=%d address=%p dqpn=0x%06x\n", + skb->len, address, dqpn); + + /* + * We put the skb into the tx_ring _before_ we call post_send() + * because it's entirely possible that the completion handler will + * run before we execute anything after the post_send(). That + * means we have to make sure everything is properly recorded and + * our state is consistent before we call post_send(). + */ + tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; + tx_req->skb = skb; + if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { + ++dev->stats.tx_errors; + dev_kfree_skb_any(skb); + return -1; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; + else + priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; + /* increase the tx_head after send success, but use it for queue state */ + if ((priv->global_tx_head - priv->global_tx_tail) == + ipoib_sendq_size - 1) { + ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); + netif_stop_queue(dev); + } + + skb_orphan(skb); + skb_dst_drop(skb); + + if (netif_queue_stopped(dev)) + if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | + IB_CQ_REPORT_MISSED_EVENTS) < 0) + ipoib_warn(priv, "request notify on send CQ failed\n"); + + rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), + address, dqpn, tx_req, phead, hlen); + if (unlikely(rc)) { + ipoib_warn(priv, "post_send failed, error %d\n", rc); + ++dev->stats.tx_errors; + ipoib_dma_unmap_tx(priv, tx_req); + dev_kfree_skb_any(skb); + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + rc = 0; + } else { + netif_trans_update(dev); + + rc = priv->tx_head; + ++priv->tx_head; + ++priv->global_tx_head; + } + return rc; +} + +static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv) +{ + struct ipoib_ah *ah, *tah; + LIST_HEAD(remove_list); + unsigned long flags; + + netif_tx_lock_bh(priv->dev); + spin_lock_irqsave(&priv->lock, flags); + + list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) + if ((int) priv->tx_tail - (int) ah->last_send >= 0) { + list_del(&ah->list); + rdma_destroy_ah(ah->ah); + kfree(ah); + } + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(priv->dev); +} + +void ipoib_reap_ah(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, ah_reap_task.work); + + ipoib_reap_dead_ahs(priv); + + if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) + queue_delayed_work(priv->wq, &priv->ah_reap_task, + round_jiffies_relative(HZ)); +} + +static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv) +{ + clear_bit(IPOIB_STOP_REAPER, &priv->flags); + queue_delayed_work(priv->wq, &priv->ah_reap_task, + round_jiffies_relative(HZ)); +} + +static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv) +{ + set_bit(IPOIB_STOP_REAPER, &priv->flags); + cancel_delayed_work(&priv->ah_reap_task); + /* + * After ipoib_stop_ah_reaper() we always go through + * ipoib_reap_dead_ahs() which ensures the work is really stopped and + * does a final flush out of the dead_ah's list + */ +} + +static int recvs_pending(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int pending = 0; + int i; + + for (i = 0; i < ipoib_recvq_size; ++i) + if (priv->rx_ring[i].skb) + ++pending; + + return pending; +} + +static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, + struct ib_qp *qp, + enum ib_qp_state new_state) +{ + struct ib_qp_attr qp_attr; + struct ib_qp_init_attr query_init_attr; + int ret; + + ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); + if (ret) { + ipoib_warn(priv, "%s: Failed to query QP\n", __func__); + return; + } + /* print according to the new-state and the previous state.*/ + if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) + ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n"); + else + ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n", + new_state, qp_attr.qp_state); +} + +static void ipoib_napi_enable(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + napi_enable(&priv->recv_napi); + napi_enable(&priv->send_napi); +} + +static void ipoib_napi_disable(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + napi_disable(&priv->recv_napi); + napi_disable(&priv->send_napi); +} + +int ipoib_ib_dev_stop_default(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_qp_attr qp_attr; + unsigned long begin; + struct ipoib_tx_buf *tx_req; + int i; + + if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) + ipoib_napi_disable(dev); + + ipoib_cm_dev_stop(dev); + + /* + * Move our QP to the error state and then reinitialize in + * when all work requests have completed or have been flushed. + */ + qp_attr.qp_state = IB_QPS_ERR; + if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) + check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); + + /* Wait for all sends and receives to complete */ + begin = jiffies; + + while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { + if (time_after(jiffies, begin + 5 * HZ)) { + ipoib_warn(priv, + "timing out; %d sends %d receives not completed\n", + priv->tx_head - priv->tx_tail, + recvs_pending(dev)); + + /* + * assume the HW is wedged and just free up + * all our pending work requests. + */ + while ((int)priv->tx_tail - (int)priv->tx_head < 0) { + tx_req = &priv->tx_ring[priv->tx_tail & + (ipoib_sendq_size - 1)]; + ipoib_dma_unmap_tx(priv, tx_req); + dev_kfree_skb_any(tx_req->skb); + ++priv->tx_tail; + ++priv->global_tx_tail; + } + + for (i = 0; i < ipoib_recvq_size; ++i) { + struct ipoib_rx_buf *rx_req; + + rx_req = &priv->rx_ring[i]; + if (!rx_req->skb) + continue; + ipoib_ud_dma_unmap_rx(priv, + priv->rx_ring[i].mapping); + dev_kfree_skb_any(rx_req->skb); + rx_req->skb = NULL; + } + + goto timeout; + } + + ipoib_drain_cq(dev); + + usleep_range(1000, 2000); + } + + ipoib_dbg(priv, "All sends and receives done.\n"); + +timeout: + qp_attr.qp_state = IB_QPS_RESET; + if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) + ipoib_warn(priv, "Failed to modify QP to RESET state\n"); + + ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); + + return 0; +} + +int ipoib_ib_dev_open_default(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret; + + ret = ipoib_init_qp(dev); + if (ret) { + ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); + return -1; + } + + ret = ipoib_ib_post_receives(dev); + if (ret) { + ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); + goto out; + } + + ret = ipoib_cm_dev_open(dev); + if (ret) { + ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); + goto out; + } + + if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) + ipoib_napi_enable(dev); + + return 0; +out: + return -1; +} + +int ipoib_ib_dev_open(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_pkey_dev_check_presence(dev); + + if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { + ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey, + (!(priv->pkey & 0x7fff) ? "Invalid" : "not found")); + return -1; + } + + ipoib_start_ah_reaper(priv); + if (priv->rn_ops->ndo_open(dev)) { + pr_warn("%s: Failed to open dev\n", dev->name); + goto dev_stop; + } + + set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + + return 0; + +dev_stop: + ipoib_stop_ah_reaper(priv); + return -1; +} + +void ipoib_ib_dev_stop(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + priv->rn_ops->ndo_stop(dev); + + clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + ipoib_stop_ah_reaper(priv); +} + +void ipoib_pkey_dev_check_presence(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + + if (!(priv->pkey & 0x7fff) || + ib_find_pkey(priv->ca, priv->port, priv->pkey, + &priv->pkey_index)) { + clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); + } else { + if (rn->set_id) + rn->set_id(dev, priv->pkey_index); + set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); + } +} + +void ipoib_ib_dev_up(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_pkey_dev_check_presence(dev); + + if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { + ipoib_dbg(priv, "PKEY is not assigned.\n"); + return; + } + + set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + + ipoib_mcast_start_thread(dev); +} + +void ipoib_ib_dev_down(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg(priv, "downing ib_dev\n"); + + clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + netif_carrier_off(dev); + + ipoib_mcast_stop_thread(dev); + ipoib_mcast_dev_flush(dev); + + ipoib_flush_paths(dev); +} + +void ipoib_drain_cq(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int i, n; + + /* + * We call completion handling routines that expect to be + * called from the BH-disabled NAPI poll context, so disable + * BHs here too. + */ + local_bh_disable(); + + do { + n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); + for (i = 0; i < n; ++i) { + /* + * Convert any successful completions to flush + * errors to avoid passing packets up the + * stack after bringing the device down. + */ + if (priv->ibwc[i].status == IB_WC_SUCCESS) + priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; + + if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) { + if (priv->ibwc[i].wr_id & IPOIB_OP_CM) + ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); + else + ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); + } else { + pr_warn("%s: Got unexpected wqe id\n", __func__); + } + } + } while (n == IPOIB_NUM_WC); + + while (poll_tx(priv)) + ; /* nothing */ + + local_bh_enable(); +} + +/* + * Takes whatever value which is in pkey index 0 and updates priv->pkey + * returns 0 if the pkey value was changed. + */ +static inline int update_parent_pkey(struct ipoib_dev_priv *priv) +{ + int result; + u16 prev_pkey; + + prev_pkey = priv->pkey; + result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); + if (result) { + ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n", + priv->port, result); + return result; + } + + priv->pkey |= 0x8000; + + if (prev_pkey != priv->pkey) { + ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n", + prev_pkey, priv->pkey); + /* + * Update the pkey in the broadcast address, while making sure to set + * the full membership bit, so that we join the right broadcast group. + */ + priv->dev->broadcast[8] = priv->pkey >> 8; + priv->dev->broadcast[9] = priv->pkey & 0xff; + return 0; + } + + return 1; +} +/* + * returns 0 if pkey value was found in a different slot. + */ +static inline int update_child_pkey(struct ipoib_dev_priv *priv) +{ + u16 old_index = priv->pkey_index; + + priv->pkey_index = 0; + ipoib_pkey_dev_check_presence(priv->dev); + + if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && + (old_index == priv->pkey_index)) + return 1; + return 0; +} + +/* + * returns true if the device address of the ipoib interface has changed and the + * new address is a valid one (i.e in the gid table), return false otherwise. + */ +static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) +{ + union ib_gid search_gid; + union ib_gid gid0; + union ib_gid *netdev_gid; + int err; + u16 index; + u8 port; + bool ret = false; + + netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4); + if (rdma_query_gid(priv->ca, priv->port, 0, &gid0)) + return false; + + netif_addr_lock_bh(priv->dev); + + /* The subnet prefix may have changed, update it now so we won't have + * to do it later + */ + priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix; + netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix; + search_gid.global.subnet_prefix = gid0.global.subnet_prefix; + + search_gid.global.interface_id = priv->local_gid.global.interface_id; + + netif_addr_unlock_bh(priv->dev); + + err = ib_find_gid(priv->ca, &search_gid, &port, &index); + + netif_addr_lock_bh(priv->dev); + + if (search_gid.global.interface_id != + priv->local_gid.global.interface_id) + /* There was a change while we were looking up the gid, bail + * here and let the next work sort this out + */ + goto out; + + /* The next section of code needs some background: + * Per IB spec the port GUID can't change if the HCA is powered on. + * port GUID is the basis for GID at index 0 which is the basis for + * the default device address of a ipoib interface. + * + * so it seems the flow should be: + * if user_changed_dev_addr && gid in gid tbl + * set bit dev_addr_set + * return true + * else + * return false + * + * The issue is that there are devices that don't follow the spec, + * they change the port GUID when the HCA is powered, so in order + * not to break userspace applications, We need to check if the + * user wanted to control the device address and we assume that + * if he sets the device address back to be based on GID index 0, + * he no longer wishs to control it. + * + * If the user doesn't control the the device address, + * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means + * the port GUID has changed and GID at index 0 has changed + * so we need to change priv->local_gid and priv->dev->dev_addr + * to reflect the new GID. + */ + if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { + if (!err && port == priv->port) { + set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); + if (index == 0) + clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL, + &priv->flags); + else + set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); + ret = true; + } else { + ret = false; + } + } else { + if (!err && port == priv->port) { + ret = true; + } else { + if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) { + memcpy(&priv->local_gid, &gid0, + sizeof(priv->local_gid)); + memcpy(priv->dev->dev_addr + 4, &gid0, + sizeof(priv->local_gid)); + ret = true; + } + } + } + +out: + netif_addr_unlock_bh(priv->dev); + + return ret; +} + +static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, + enum ipoib_flush_level level, + int nesting) +{ + struct ipoib_dev_priv *cpriv; + struct net_device *dev = priv->dev; + int result; + + down_read_nested(&priv->vlan_rwsem, nesting); + + /* + * Flush any child interfaces too -- they might be up even if + * the parent is down. + */ + list_for_each_entry(cpriv, &priv->child_intfs, list) + __ipoib_ib_dev_flush(cpriv, level, nesting + 1); + + up_read(&priv->vlan_rwsem); + + if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && + level != IPOIB_FLUSH_HEAVY) { + /* Make sure the dev_addr is set even if not flushing */ + if (level == IPOIB_FLUSH_LIGHT) + ipoib_dev_addr_changed_valid(priv); + ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); + return; + } + + if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { + /* interface is down. update pkey and leave. */ + if (level == IPOIB_FLUSH_HEAVY) { + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) + update_parent_pkey(priv); + else + update_child_pkey(priv); + } else if (level == IPOIB_FLUSH_LIGHT) + ipoib_dev_addr_changed_valid(priv); + ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); + return; + } + + if (level == IPOIB_FLUSH_HEAVY) { + /* child devices chase their origin pkey value, while non-child + * (parent) devices should always takes what present in pkey index 0 + */ + if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { + result = update_child_pkey(priv); + if (result) { + /* restart QP only if P_Key index is changed */ + ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); + return; + } + + } else { + result = update_parent_pkey(priv); + /* restart QP only if P_Key value changed */ + if (result) { + ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n"); + return; + } + } + } + + if (level == IPOIB_FLUSH_LIGHT) { + int oper_up; + ipoib_mark_paths_invalid(dev); + /* Set IPoIB operation as down to prevent races between: + * the flush flow which leaves MCG and on the fly joins + * which can happen during that time. mcast restart task + * should deal with join requests we missed. + */ + oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + ipoib_mcast_dev_flush(dev); + if (oper_up) + set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + ipoib_reap_dead_ahs(priv); + } + + if (level >= IPOIB_FLUSH_NORMAL) + ipoib_ib_dev_down(dev); + + if (level == IPOIB_FLUSH_HEAVY) { + if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) + ipoib_ib_dev_stop(dev); + + if (ipoib_ib_dev_open(dev)) + return; + + if (netif_queue_stopped(dev)) + netif_start_queue(dev); + } + + /* + * The device could have been brought down between the start and when + * we get here, don't bring it back up if it's not configured up + */ + if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { + if (level >= IPOIB_FLUSH_NORMAL) + ipoib_ib_dev_up(dev); + if (ipoib_dev_addr_changed_valid(priv)) + ipoib_mcast_restart_task(&priv->restart_task); + } +} + +void ipoib_ib_dev_flush_light(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, flush_light); + + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); +} + +void ipoib_ib_dev_flush_normal(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, flush_normal); + + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); +} + +void ipoib_ib_dev_flush_heavy(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, flush_heavy); + + rtnl_lock(); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); + rtnl_unlock(); +} + +void ipoib_ib_dev_cleanup(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg(priv, "cleaning up ib_dev\n"); + /* + * We must make sure there are no more (path) completions + * that may wish to touch priv fields that are no longer valid + */ + ipoib_flush_paths(dev); + + ipoib_mcast_stop_thread(dev); + ipoib_mcast_dev_flush(dev); + + /* + * All of our ah references aren't free until after + * ipoib_mcast_dev_flush(), ipoib_flush_paths, and + * the neighbor garbage collection is stopped and reaped. + * That should all be done now, so make a final ah flush. + */ + ipoib_reap_dead_ahs(priv); + + clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); + + priv->rn_ops->ndo_uninit(dev); + + if (priv->pd) { + ib_dealloc_pd(priv->pd); + priv->pd = NULL; + } +} + diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c new file mode 100644 index 000000000..d0c35eb68 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -0,0 +1,2609 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "ipoib.h" + +#include <linux/module.h> + +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/vmalloc.h> + +#include <linux/if_arp.h> /* For ARPHRD_xxx */ + +#include <linux/ip.h> +#include <linux/in.h> + +#include <linux/jhash.h> +#include <net/arp.h> +#include <net/addrconf.h> +#include <linux/inetdevice.h> +#include <rdma/ib_cache.h> + +#define DRV_VERSION "1.0.0" + +const char ipoib_driver_version[] = DRV_VERSION; + +MODULE_AUTHOR("Roland Dreier"); +MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); +MODULE_LICENSE("Dual BSD/GPL"); + +int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; +int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; + +module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); +MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); +module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); +MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +int ipoib_debug_level; + +module_param_named(debug_level, ipoib_debug_level, int, 0644); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); +#endif + +struct ipoib_path_iter { + struct net_device *dev; + struct ipoib_path path; +}; + +static const u8 ipv4_bcast_addr[] = { + 0x00, 0xff, 0xff, 0xff, + 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff +}; + +struct workqueue_struct *ipoib_workqueue; + +struct ib_sa_client ipoib_sa_client; + +static void ipoib_add_one(struct ib_device *device); +static void ipoib_remove_one(struct ib_device *device, void *client_data); +static void ipoib_neigh_reclaim(struct rcu_head *rp); +static struct net_device *ipoib_get_net_dev_by_params( + struct ib_device *dev, u8 port, u16 pkey, + const union ib_gid *gid, const struct sockaddr *addr, + void *client_data); +static int ipoib_set_mac(struct net_device *dev, void *addr); +static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd); + +static struct ib_client ipoib_client = { + .name = "ipoib", + .add = ipoib_add_one, + .remove = ipoib_remove_one, + .get_net_dev_by_params = ipoib_get_net_dev_by_params, +}; + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +static int ipoib_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netdev_notifier_info *ni = ptr; + struct net_device *dev = ni->dev; + + if (dev->netdev_ops->ndo_open != ipoib_open) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_REGISTER: + ipoib_create_debug_files(dev); + break; + case NETDEV_CHANGENAME: + ipoib_delete_debug_files(dev); + ipoib_create_debug_files(dev); + break; + case NETDEV_UNREGISTER: + ipoib_delete_debug_files(dev); + break; + } + + return NOTIFY_DONE; +} +#endif + +int ipoib_open(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg(priv, "bringing up interface\n"); + + netif_carrier_off(dev); + + set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); + + priv->sm_fullmember_sendonly_support = false; + + if (ipoib_ib_dev_open(dev)) { + if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) + return 0; + goto err_disable; + } + + ipoib_ib_dev_up(dev); + + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { + struct ipoib_dev_priv *cpriv; + + /* Bring up any child interfaces too */ + down_read(&priv->vlan_rwsem); + list_for_each_entry(cpriv, &priv->child_intfs, list) { + int flags; + + flags = cpriv->dev->flags; + if (flags & IFF_UP) + continue; + + dev_change_flags(cpriv->dev, flags | IFF_UP); + } + up_read(&priv->vlan_rwsem); + } + + netif_start_queue(dev); + + return 0; + +err_disable: + clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); + + return -EINVAL; +} + +static int ipoib_stop(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg(priv, "stopping interface\n"); + + clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); + + netif_stop_queue(dev); + + ipoib_ib_dev_down(dev); + ipoib_ib_dev_stop(dev); + + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { + struct ipoib_dev_priv *cpriv; + + /* Bring down any child interfaces too */ + down_read(&priv->vlan_rwsem); + list_for_each_entry(cpriv, &priv->child_intfs, list) { + int flags; + + flags = cpriv->dev->flags; + if (!(flags & IFF_UP)) + continue; + + dev_change_flags(cpriv->dev, flags & ~IFF_UP); + } + up_read(&priv->vlan_rwsem); + } + + return 0; +} + +static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); + + return features; +} + +static int ipoib_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret = 0; + + /* dev->mtu > 2K ==> connected mode */ + if (ipoib_cm_admin_enabled(dev)) { + if (new_mtu > ipoib_cm_max_mtu(dev)) + return -EINVAL; + + if (new_mtu > priv->mcast_mtu) + ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", + priv->mcast_mtu); + + dev->mtu = new_mtu; + return 0; + } + + if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) || + new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) + return -EINVAL; + + priv->admin_mtu = new_mtu; + + if (priv->mcast_mtu < priv->admin_mtu) + ipoib_dbg(priv, "MTU must be smaller than the underlying " + "link layer MTU - 4 (%u)\n", priv->mcast_mtu); + + new_mtu = min(priv->mcast_mtu, priv->admin_mtu); + + if (priv->rn_ops->ndo_change_mtu) { + bool carrier_status = netif_carrier_ok(dev); + + netif_carrier_off(dev); + + /* notify lower level on the real mtu */ + ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu); + + if (carrier_status) + netif_carrier_on(dev); + } else { + dev->mtu = new_mtu; + } + + return ret; +} + +static void ipoib_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (priv->rn_ops->ndo_get_stats64) + priv->rn_ops->ndo_get_stats64(dev, stats); + else + netdev_stats_to_stats64(stats, &dev->stats); +} + +/* Called with an RCU read lock taken */ +static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, + struct net_device *dev) +{ + struct net *net = dev_net(dev); + struct in_device *in_dev; + struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; + struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; + __be32 ret_addr; + + switch (addr->sa_family) { + case AF_INET: + in_dev = in_dev_get(dev); + if (!in_dev) + return false; + + ret_addr = inet_confirm_addr(net, in_dev, 0, + addr_in->sin_addr.s_addr, + RT_SCOPE_HOST); + in_dev_put(in_dev); + if (ret_addr) + return true; + + break; + case AF_INET6: + if (IS_ENABLED(CONFIG_IPV6) && + ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) + return true; + + break; + } + return false; +} + +/** + * Find the master net_device on top of the given net_device. + * @dev: base IPoIB net_device + * + * Returns the master net_device with a reference held, or the same net_device + * if no master exists. + */ +static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) +{ + struct net_device *master; + + rcu_read_lock(); + master = netdev_master_upper_dev_get_rcu(dev); + if (master) + dev_hold(master); + rcu_read_unlock(); + + if (master) + return master; + + dev_hold(dev); + return dev; +} + +struct ipoib_walk_data { + const struct sockaddr *addr; + struct net_device *result; +}; + +static int ipoib_upper_walk(struct net_device *upper, void *_data) +{ + struct ipoib_walk_data *data = _data; + int ret = 0; + + if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) { + dev_hold(upper); + data->result = upper; + ret = 1; + } + + return ret; +} + +/** + * Find a net_device matching the given address, which is an upper device of + * the given net_device. + * @addr: IP address to look for. + * @dev: base IPoIB net_device + * + * If found, returns the net_device with a reference held. Otherwise return + * NULL. + */ +static struct net_device *ipoib_get_net_dev_match_addr( + const struct sockaddr *addr, struct net_device *dev) +{ + struct ipoib_walk_data data = { + .addr = addr, + }; + + rcu_read_lock(); + if (ipoib_is_dev_match_addr_rcu(addr, dev)) { + dev_hold(dev); + data.result = dev; + goto out; + } + + netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data); +out: + rcu_read_unlock(); + return data.result; +} + +/* returns the number of IPoIB netdevs on top a given ipoib device matching a + * pkey_index and address, if one exists. + * + * @found_net_dev: contains a matching net_device if the return value >= 1, + * with a reference held. */ +static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, + const union ib_gid *gid, + u16 pkey_index, + const struct sockaddr *addr, + int nesting, + struct net_device **found_net_dev) +{ + struct ipoib_dev_priv *child_priv; + struct net_device *net_dev = NULL; + int matches = 0; + + if (priv->pkey_index == pkey_index && + (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { + if (!addr) { + net_dev = ipoib_get_master_net_dev(priv->dev); + } else { + /* Verify the net_device matches the IP address, as + * IPoIB child devices currently share a GID. */ + net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); + } + if (net_dev) { + if (!*found_net_dev) + *found_net_dev = net_dev; + else + dev_put(net_dev); + ++matches; + } + } + + /* Check child interfaces */ + down_read_nested(&priv->vlan_rwsem, nesting); + list_for_each_entry(child_priv, &priv->child_intfs, list) { + matches += ipoib_match_gid_pkey_addr(child_priv, gid, + pkey_index, addr, + nesting + 1, + found_net_dev); + if (matches > 1) + break; + } + up_read(&priv->vlan_rwsem); + + return matches; +} + +/* Returns the number of matching net_devs found (between 0 and 2). Also + * return the matching net_device in the @net_dev parameter, holding a + * reference to the net_device, if the number of matches >= 1 */ +static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, + u16 pkey_index, + const union ib_gid *gid, + const struct sockaddr *addr, + struct net_device **net_dev) +{ + struct ipoib_dev_priv *priv; + int matches = 0; + + *net_dev = NULL; + + list_for_each_entry(priv, dev_list, list) { + if (priv->port != port) + continue; + + matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, + addr, 0, net_dev); + if (matches > 1) + break; + } + + return matches; +} + +static struct net_device *ipoib_get_net_dev_by_params( + struct ib_device *dev, u8 port, u16 pkey, + const union ib_gid *gid, const struct sockaddr *addr, + void *client_data) +{ + struct net_device *net_dev; + struct list_head *dev_list = client_data; + u16 pkey_index; + int matches; + int ret; + + if (!rdma_protocol_ib(dev, port)) + return NULL; + + ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); + if (ret) + return NULL; + + if (!dev_list) + return NULL; + + /* See if we can find a unique device matching the L2 parameters */ + matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, + gid, NULL, &net_dev); + + switch (matches) { + case 0: + return NULL; + case 1: + return net_dev; + } + + dev_put(net_dev); + + /* Couldn't find a unique device with L2 parameters only. Use L3 + * address to uniquely match the net device */ + matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, + gid, addr, &net_dev); + switch (matches) { + case 0: + return NULL; + default: + dev_warn_ratelimited(&dev->dev, + "duplicate IP address detected\n"); + /* Fall through */ + case 1: + return net_dev; + } +} + +int ipoib_set_mode(struct net_device *dev, const char *buf) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && + !strcmp(buf, "connected\n")) || + (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && + !strcmp(buf, "datagram\n"))) { + return 0; + } + + /* flush paths if we switch modes so that connections are restarted */ + if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { + set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); + ipoib_warn(priv, "enabling connected mode " + "will cause multicast packet drops\n"); + netdev_update_features(dev); + dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); + rtnl_unlock(); + priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; + + ipoib_flush_paths(dev); + return (!rtnl_trylock()) ? -EBUSY : 0; + } + + if (!strcmp(buf, "datagram\n")) { + clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); + netdev_update_features(dev); + dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); + rtnl_unlock(); + ipoib_flush_paths(dev); + return (!rtnl_trylock()) ? -EBUSY : 0; + } + + return -EINVAL; +} + +struct ipoib_path *__path_find(struct net_device *dev, void *gid) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rb_node *n = priv->path_tree.rb_node; + struct ipoib_path *path; + int ret; + + while (n) { + path = rb_entry(n, struct ipoib_path, rb_node); + + ret = memcmp(gid, path->pathrec.dgid.raw, + sizeof (union ib_gid)); + + if (ret < 0) + n = n->rb_left; + else if (ret > 0) + n = n->rb_right; + else + return path; + } + + return NULL; +} + +static int __path_add(struct net_device *dev, struct ipoib_path *path) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rb_node **n = &priv->path_tree.rb_node; + struct rb_node *pn = NULL; + struct ipoib_path *tpath; + int ret; + + while (*n) { + pn = *n; + tpath = rb_entry(pn, struct ipoib_path, rb_node); + + ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, + sizeof (union ib_gid)); + if (ret < 0) + n = &pn->rb_left; + else if (ret > 0) + n = &pn->rb_right; + else + return -EEXIST; + } + + rb_link_node(&path->rb_node, pn, n); + rb_insert_color(&path->rb_node, &priv->path_tree); + + list_add_tail(&path->list, &priv->path_list); + + return 0; +} + +static void path_free(struct net_device *dev, struct ipoib_path *path) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&path->queue))) + dev_kfree_skb_irq(skb); + + ipoib_dbg(ipoib_priv(dev), "path_free\n"); + + /* remove all neigh connected to this path */ + ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); + + if (path->ah) + ipoib_put_ah(path->ah); + + kfree(path); +} + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + +struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) +{ + struct ipoib_path_iter *iter; + + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return NULL; + + iter->dev = dev; + memset(iter->path.pathrec.dgid.raw, 0, 16); + + if (ipoib_path_iter_next(iter)) { + kfree(iter); + return NULL; + } + + return iter; +} + +int ipoib_path_iter_next(struct ipoib_path_iter *iter) +{ + struct ipoib_dev_priv *priv = ipoib_priv(iter->dev); + struct rb_node *n; + struct ipoib_path *path; + int ret = 1; + + spin_lock_irq(&priv->lock); + + n = rb_first(&priv->path_tree); + + while (n) { + path = rb_entry(n, struct ipoib_path, rb_node); + + if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, + sizeof (union ib_gid)) < 0) { + iter->path = *path; + ret = 0; + break; + } + + n = rb_next(n); + } + + spin_unlock_irq(&priv->lock); + + return ret; +} + +void ipoib_path_iter_read(struct ipoib_path_iter *iter, + struct ipoib_path *path) +{ + *path = iter->path; +} + +#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ + +void ipoib_mark_paths_invalid(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_path *path, *tp; + + spin_lock_irq(&priv->lock); + + list_for_each_entry_safe(path, tp, &priv->path_list, list) { + ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n", + be32_to_cpu(sa_path_get_dlid(&path->pathrec)), + path->pathrec.dgid.raw); + if (path->ah) + path->ah->valid = 0; + } + + spin_unlock_irq(&priv->lock); +} + +static void push_pseudo_header(struct sk_buff *skb, const char *daddr) +{ + struct ipoib_pseudo_header *phdr; + + phdr = skb_push(skb, sizeof(*phdr)); + memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); +} + +void ipoib_flush_paths(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_path *path, *tp; + LIST_HEAD(remove_list); + unsigned long flags; + + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + + list_splice_init(&priv->path_list, &remove_list); + + list_for_each_entry(path, &remove_list, list) + rb_erase(&path->rb_node, &priv->path_tree); + + list_for_each_entry_safe(path, tp, &remove_list, list) { + if (path->query) + ib_sa_cancel_query(path->query_id, path->query); + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); + wait_for_completion(&path->done); + path_free(dev, path); + netif_tx_lock_bh(dev); + spin_lock_irqsave(&priv->lock, flags); + } + + spin_unlock_irqrestore(&priv->lock, flags); + netif_tx_unlock_bh(dev); +} + +static void path_rec_completion(int status, + struct sa_path_rec *pathrec, + void *path_ptr) +{ + struct ipoib_path *path = path_ptr; + struct net_device *dev = path->dev; + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_ah *ah = NULL; + struct ipoib_ah *old_ah = NULL; + struct ipoib_neigh *neigh, *tn; + struct sk_buff_head skqueue; + struct sk_buff *skb; + unsigned long flags; + + if (!status) + ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", + be32_to_cpu(sa_path_get_dlid(pathrec)), + pathrec->dgid.raw); + else + ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", + status, path->pathrec.dgid.raw); + + skb_queue_head_init(&skqueue); + + if (!status) { + struct rdma_ah_attr av; + + if (!ib_init_ah_attr_from_path(priv->ca, priv->port, + pathrec, &av, NULL)) { + ah = ipoib_create_ah(dev, priv->pd, &av); + rdma_destroy_ah_attr(&av); + } + } + + spin_lock_irqsave(&priv->lock, flags); + + if (!IS_ERR_OR_NULL(ah)) { + /* + * pathrec.dgid is used as the database key from the LLADDR, + * it must remain unchanged even if the SA returns a different + * GID to use in the AH. + */ + if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid))) { + ipoib_dbg( + priv, + "%s got PathRec for gid %pI6 while asked for %pI6\n", + dev->name, pathrec->dgid.raw, + path->pathrec.dgid.raw); + memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid)); + } + + path->pathrec = *pathrec; + + old_ah = path->ah; + path->ah = ah; + + ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", + ah, be32_to_cpu(sa_path_get_dlid(pathrec)), + pathrec->sl); + + while ((skb = __skb_dequeue(&path->queue))) + __skb_queue_tail(&skqueue, skb); + + list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { + if (neigh->ah) { + WARN_ON(neigh->ah != old_ah); + /* + * Dropping the ah reference inside + * priv->lock is safe here, because we + * will hold one more reference from + * the original value of path->ah (ie + * old_ah). + */ + ipoib_put_ah(neigh->ah); + } + kref_get(&path->ah->ref); + neigh->ah = path->ah; + + if (ipoib_cm_enabled(dev, neigh->daddr)) { + if (!ipoib_cm_get(neigh)) + ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, + path, + neigh)); + if (!ipoib_cm_get(neigh)) { + ipoib_neigh_free(neigh); + continue; + } + } + + while ((skb = __skb_dequeue(&neigh->queue))) + __skb_queue_tail(&skqueue, skb); + } + path->ah->valid = 1; + } + + path->query = NULL; + complete(&path->done); + + spin_unlock_irqrestore(&priv->lock, flags); + + if (IS_ERR_OR_NULL(ah)) + ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); + + if (old_ah) + ipoib_put_ah(old_ah); + + while ((skb = __skb_dequeue(&skqueue))) { + int ret; + skb->dev = dev; + ret = dev_queue_xmit(skb); + if (ret) + ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n", + __func__, ret); + } +} + +static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path, + void *gid) +{ + path->dev = priv->dev; + + if (rdma_cap_opa_ah(priv->ca, priv->port)) + path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA; + else + path->pathrec.rec_type = SA_PATH_REC_TYPE_IB; + + memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid)); + path->pathrec.sgid = priv->local_gid; + path->pathrec.pkey = cpu_to_be16(priv->pkey); + path->pathrec.numb_path = 1; + path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; +} + +static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_path *path; + + if (!priv->broadcast) + return NULL; + + path = kzalloc(sizeof(*path), GFP_ATOMIC); + if (!path) + return NULL; + + skb_queue_head_init(&path->queue); + + INIT_LIST_HEAD(&path->neigh_list); + + init_path_rec(priv, path, gid); + + return path; +} + +static int path_rec_start(struct net_device *dev, + struct ipoib_path *path) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg(priv, "Start path record lookup for %pI6\n", + path->pathrec.dgid.raw); + + init_completion(&path->done); + + path->query_id = + ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, + &path->pathrec, + IB_SA_PATH_REC_DGID | + IB_SA_PATH_REC_SGID | + IB_SA_PATH_REC_NUMB_PATH | + IB_SA_PATH_REC_TRAFFIC_CLASS | + IB_SA_PATH_REC_PKEY, + 1000, GFP_ATOMIC, + path_rec_completion, + path, &path->query); + if (path->query_id < 0) { + ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); + path->query = NULL; + complete(&path->done); + return path->query_id; + } + + return 0; +} + +static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr, + struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_path *path; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + path = __path_find(dev, daddr + 4); + if (!path) + goto out; + if (!path->query) + path_rec_start(dev, path); +out: + spin_unlock_irqrestore(&priv->lock, flags); +} + +static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, + struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + struct ipoib_path *path; + struct ipoib_neigh *neigh; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + neigh = ipoib_neigh_alloc(daddr, dev); + if (!neigh) { + spin_unlock_irqrestore(&priv->lock, flags); + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + return NULL; + } + + /* To avoid race condition, make sure that the + * neigh will be added only once. + */ + if (unlikely(!list_empty(&neigh->list))) { + spin_unlock_irqrestore(&priv->lock, flags); + return neigh; + } + + path = __path_find(dev, daddr + 4); + if (!path) { + path = path_rec_create(dev, daddr + 4); + if (!path) + goto err_path; + + __path_add(dev, path); + } + + list_add_tail(&neigh->list, &path->neigh_list); + + if (path->ah && path->ah->valid) { + kref_get(&path->ah->ref); + neigh->ah = path->ah; + + if (ipoib_cm_enabled(dev, neigh->daddr)) { + if (!ipoib_cm_get(neigh)) + ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); + if (!ipoib_cm_get(neigh)) { + ipoib_neigh_free(neigh); + goto err_drop; + } + if (skb_queue_len(&neigh->queue) < + IPOIB_MAX_PATH_REC_QUEUE) { + push_pseudo_header(skb, neigh->daddr); + __skb_queue_tail(&neigh->queue, skb); + } else { + ipoib_warn(priv, "queue length limit %d. Packet drop.\n", + skb_queue_len(&neigh->queue)); + goto err_drop; + } + } else { + spin_unlock_irqrestore(&priv->lock, flags); + path->ah->last_send = rn->send(dev, skb, path->ah->ah, + IPOIB_QPN(daddr)); + ipoib_neigh_put(neigh); + return NULL; + } + } else { + neigh->ah = NULL; + + if (!path->query && path_rec_start(dev, path)) + goto err_path; + if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + push_pseudo_header(skb, neigh->daddr); + __skb_queue_tail(&neigh->queue, skb); + } else { + goto err_drop; + } + } + + spin_unlock_irqrestore(&priv->lock, flags); + ipoib_neigh_put(neigh); + return NULL; + +err_path: + ipoib_neigh_free(neigh); +err_drop: + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + + spin_unlock_irqrestore(&priv->lock, flags); + ipoib_neigh_put(neigh); + + return NULL; +} + +static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, + struct ipoib_pseudo_header *phdr) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + struct ipoib_path *path; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* no broadcast means that all paths are (going to be) not valid */ + if (!priv->broadcast) + goto drop_and_unlock; + + path = __path_find(dev, phdr->hwaddr + 4); + if (!path || !path->ah || !path->ah->valid) { + if (!path) { + path = path_rec_create(dev, phdr->hwaddr + 4); + if (!path) + goto drop_and_unlock; + __path_add(dev, path); + } else { + /* + * make sure there are no changes in the existing + * path record + */ + init_path_rec(priv, path, phdr->hwaddr + 4); + } + if (!path->query && path_rec_start(dev, path)) { + goto drop_and_unlock; + } + + if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + push_pseudo_header(skb, phdr->hwaddr); + __skb_queue_tail(&path->queue, skb); + goto unlock; + } else { + goto drop_and_unlock; + } + } + + spin_unlock_irqrestore(&priv->lock, flags); + ipoib_dbg(priv, "Send unicast ARP to %08x\n", + be32_to_cpu(sa_path_get_dlid(&path->pathrec))); + path->ah->last_send = rn->send(dev, skb, path->ah->ah, + IPOIB_QPN(phdr->hwaddr)); + return; + +drop_and_unlock: + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); +unlock: + spin_unlock_irqrestore(&priv->lock, flags); +} + +static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + struct ipoib_neigh *neigh; + struct ipoib_pseudo_header *phdr; + struct ipoib_header *header; + unsigned long flags; + + phdr = (struct ipoib_pseudo_header *) skb->data; + skb_pull(skb, sizeof(*phdr)); + header = (struct ipoib_header *) skb->data; + + if (unlikely(phdr->hwaddr[4] == 0xff)) { + /* multicast, arrange "if" according to probability */ + if ((header->proto != htons(ETH_P_IP)) && + (header->proto != htons(ETH_P_IPV6)) && + (header->proto != htons(ETH_P_ARP)) && + (header->proto != htons(ETH_P_RARP)) && + (header->proto != htons(ETH_P_TIPC))) { + /* ethertype not supported by IPoIB */ + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* Add in the P_Key for multicast*/ + phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; + phdr->hwaddr[9] = priv->pkey & 0xff; + + neigh = ipoib_neigh_get(dev, phdr->hwaddr); + if (likely(neigh)) + goto send_using_neigh; + ipoib_mcast_send(dev, phdr->hwaddr, skb); + return NETDEV_TX_OK; + } + + /* unicast, arrange "switch" according to probability */ + switch (header->proto) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + case htons(ETH_P_TIPC): + neigh = ipoib_neigh_get(dev, phdr->hwaddr); + if (unlikely(!neigh)) { + neigh = neigh_add_path(skb, phdr->hwaddr, dev); + if (likely(!neigh)) + return NETDEV_TX_OK; + } + break; + case htons(ETH_P_ARP): + case htons(ETH_P_RARP): + /* for unicast ARP and RARP should always perform path find */ + unicast_arp_send(skb, dev, phdr); + return NETDEV_TX_OK; + default: + /* ethertype not supported by IPoIB */ + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + +send_using_neigh: + /* note we now hold a ref to neigh */ + if (ipoib_cm_get(neigh)) { + if (ipoib_cm_up(neigh)) { + ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); + goto unref; + } + } else if (neigh->ah && neigh->ah->valid) { + neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah, + IPOIB_QPN(phdr->hwaddr)); + goto unref; + } else if (neigh->ah) { + neigh_refresh_path(neigh, phdr->hwaddr, dev); + } + + if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + push_pseudo_header(skb, phdr->hwaddr); + spin_lock_irqsave(&priv->lock, flags); + __skb_queue_tail(&neigh->queue, skb); + spin_unlock_irqrestore(&priv->lock, flags); + } else { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + } + +unref: + ipoib_neigh_put(neigh); + + return NETDEV_TX_OK; +} + +static void ipoib_timeout(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_warn(priv, "transmit timeout: latency %d msecs\n", + jiffies_to_msecs(jiffies - dev_trans_start(dev))); + ipoib_warn(priv, + "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n", + netif_queue_stopped(dev), priv->tx_head, priv->tx_tail, + priv->global_tx_head, priv->global_tx_tail); + + /* XXX reset QP, etc. */ +} + +static int ipoib_hard_header(struct sk_buff *skb, + struct net_device *dev, + unsigned short type, + const void *daddr, + const void *saddr, + unsigned int len) +{ + struct ipoib_header *header; + + header = skb_push(skb, sizeof(*header)); + + header->proto = htons(type); + header->reserved = 0; + + /* + * we don't rely on dst_entry structure, always stuff the + * destination address into skb hard header so we can figure out where + * to send the packet later. + */ + push_pseudo_header(skb, daddr); + + return IPOIB_HARD_LEN; +} + +static void ipoib_set_mcast_list(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { + ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); + return; + } + + queue_work(priv->wq, &priv->restart_task); +} + +static int ipoib_get_iflink(const struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + /* parent interface */ + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) + return dev->ifindex; + + /* child/vlan interface */ + return priv->parent->ifindex; +} + +static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) +{ + /* + * Use only the address parts that contributes to spreading + * The subnet prefix is not used as one can not connect to + * same remote port (GUID) using the same remote QPN via two + * different subnets. + */ + /* qpn octets[1:4) & port GUID octets[12:20) */ + u32 *d32 = (u32 *) daddr; + u32 hv; + + hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); + return hv & htbl->mask; +} + +struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_neigh_table *ntbl = &priv->ntbl; + struct ipoib_neigh_hash *htbl; + struct ipoib_neigh *neigh = NULL; + u32 hash_val; + + rcu_read_lock_bh(); + + htbl = rcu_dereference_bh(ntbl->htbl); + + if (!htbl) + goto out_unlock; + + hash_val = ipoib_addr_hash(htbl, daddr); + for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); + neigh != NULL; + neigh = rcu_dereference_bh(neigh->hnext)) { + if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { + /* found, take one ref on behalf of the caller */ + if (!atomic_inc_not_zero(&neigh->refcnt)) { + /* deleted */ + neigh = NULL; + goto out_unlock; + } + + if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) + neigh->alive = jiffies; + goto out_unlock; + } + } + +out_unlock: + rcu_read_unlock_bh(); + return neigh; +} + +static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) +{ + struct ipoib_neigh_table *ntbl = &priv->ntbl; + struct ipoib_neigh_hash *htbl; + unsigned long neigh_obsolete; + unsigned long dt; + unsigned long flags; + int i; + LIST_HEAD(remove_list); + + spin_lock_irqsave(&priv->lock, flags); + + htbl = rcu_dereference_protected(ntbl->htbl, + lockdep_is_held(&priv->lock)); + + if (!htbl) + goto out_unlock; + + /* neigh is obsolete if it was idle for two GC periods */ + dt = 2 * arp_tbl.gc_interval; + neigh_obsolete = jiffies - dt; + + for (i = 0; i < htbl->size; i++) { + struct ipoib_neigh *neigh; + struct ipoib_neigh __rcu **np = &htbl->buckets[i]; + + while ((neigh = rcu_dereference_protected(*np, + lockdep_is_held(&priv->lock))) != NULL) { + /* was the neigh idle for two GC periods */ + if (time_after(neigh_obsolete, neigh->alive)) { + + ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list); + + rcu_assign_pointer(*np, + rcu_dereference_protected(neigh->hnext, + lockdep_is_held(&priv->lock))); + /* remove from path/mc list */ + list_del_init(&neigh->list); + call_rcu(&neigh->rcu, ipoib_neigh_reclaim); + } else { + np = &neigh->hnext; + } + + } + } + +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + ipoib_mcast_remove_list(&remove_list); +} + +static void ipoib_reap_neigh(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); + + __ipoib_reap_neigh(priv); + + queue_delayed_work(priv->wq, &priv->neigh_reap_task, + arp_tbl.gc_interval); +} + + +static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, + struct net_device *dev) +{ + struct ipoib_neigh *neigh; + + neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC); + if (!neigh) + return NULL; + + neigh->dev = dev; + memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); + skb_queue_head_init(&neigh->queue); + INIT_LIST_HEAD(&neigh->list); + ipoib_cm_set(neigh, NULL); + /* one ref on behalf of the caller */ + atomic_set(&neigh->refcnt, 1); + + return neigh; +} + +struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, + struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_neigh_table *ntbl = &priv->ntbl; + struct ipoib_neigh_hash *htbl; + struct ipoib_neigh *neigh; + u32 hash_val; + + htbl = rcu_dereference_protected(ntbl->htbl, + lockdep_is_held(&priv->lock)); + if (!htbl) { + neigh = NULL; + goto out_unlock; + } + + /* need to add a new neigh, but maybe some other thread succeeded? + * recalc hash, maybe hash resize took place so we do a search + */ + hash_val = ipoib_addr_hash(htbl, daddr); + for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], + lockdep_is_held(&priv->lock)); + neigh != NULL; + neigh = rcu_dereference_protected(neigh->hnext, + lockdep_is_held(&priv->lock))) { + if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { + /* found, take one ref on behalf of the caller */ + if (!atomic_inc_not_zero(&neigh->refcnt)) { + /* deleted */ + neigh = NULL; + break; + } + neigh->alive = jiffies; + goto out_unlock; + } + } + + neigh = ipoib_neigh_ctor(daddr, dev); + if (!neigh) + goto out_unlock; + + /* one ref on behalf of the hash table */ + atomic_inc(&neigh->refcnt); + neigh->alive = jiffies; + /* put in hash */ + rcu_assign_pointer(neigh->hnext, + rcu_dereference_protected(htbl->buckets[hash_val], + lockdep_is_held(&priv->lock))); + rcu_assign_pointer(htbl->buckets[hash_val], neigh); + atomic_inc(&ntbl->entries); + +out_unlock: + + return neigh; +} + +void ipoib_neigh_dtor(struct ipoib_neigh *neigh) +{ + /* neigh reference count was dropprd to zero */ + struct net_device *dev = neigh->dev; + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct sk_buff *skb; + if (neigh->ah) + ipoib_put_ah(neigh->ah); + while ((skb = __skb_dequeue(&neigh->queue))) { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + } + if (ipoib_cm_get(neigh)) + ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); + ipoib_dbg(ipoib_priv(dev), + "neigh free for %06x %pI6\n", + IPOIB_QPN(neigh->daddr), + neigh->daddr + 4); + kfree(neigh); + if (atomic_dec_and_test(&priv->ntbl.entries)) { + if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) + complete(&priv->ntbl.flushed); + } +} + +static void ipoib_neigh_reclaim(struct rcu_head *rp) +{ + /* Called as a result of removal from hash table */ + struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); + /* note TX context may hold another ref */ + ipoib_neigh_put(neigh); +} + +void ipoib_neigh_free(struct ipoib_neigh *neigh) +{ + struct net_device *dev = neigh->dev; + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_neigh_table *ntbl = &priv->ntbl; + struct ipoib_neigh_hash *htbl; + struct ipoib_neigh __rcu **np; + struct ipoib_neigh *n; + u32 hash_val; + + htbl = rcu_dereference_protected(ntbl->htbl, + lockdep_is_held(&priv->lock)); + if (!htbl) + return; + + hash_val = ipoib_addr_hash(htbl, neigh->daddr); + np = &htbl->buckets[hash_val]; + for (n = rcu_dereference_protected(*np, + lockdep_is_held(&priv->lock)); + n != NULL; + n = rcu_dereference_protected(*np, + lockdep_is_held(&priv->lock))) { + if (n == neigh) { + /* found */ + rcu_assign_pointer(*np, + rcu_dereference_protected(neigh->hnext, + lockdep_is_held(&priv->lock))); + /* remove from parent list */ + list_del_init(&neigh->list); + call_rcu(&neigh->rcu, ipoib_neigh_reclaim); + return; + } else { + np = &n->hnext; + } + } +} + +static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) +{ + struct ipoib_neigh_table *ntbl = &priv->ntbl; + struct ipoib_neigh_hash *htbl; + struct ipoib_neigh __rcu **buckets; + u32 size; + + clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); + ntbl->htbl = NULL; + htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); + if (!htbl) + return -ENOMEM; + size = roundup_pow_of_two(arp_tbl.gc_thresh3); + buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL); + if (!buckets) { + kfree(htbl); + return -ENOMEM; + } + htbl->size = size; + htbl->mask = (size - 1); + htbl->buckets = buckets; + RCU_INIT_POINTER(ntbl->htbl, htbl); + htbl->ntbl = ntbl; + atomic_set(&ntbl->entries, 0); + + /* start garbage collection */ + queue_delayed_work(priv->wq, &priv->neigh_reap_task, + arp_tbl.gc_interval); + + return 0; +} + +static void neigh_hash_free_rcu(struct rcu_head *head) +{ + struct ipoib_neigh_hash *htbl = container_of(head, + struct ipoib_neigh_hash, + rcu); + struct ipoib_neigh __rcu **buckets = htbl->buckets; + struct ipoib_neigh_table *ntbl = htbl->ntbl; + + kvfree(buckets); + kfree(htbl); + complete(&ntbl->deleted); +} + +void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_neigh_table *ntbl = &priv->ntbl; + struct ipoib_neigh_hash *htbl; + unsigned long flags; + int i; + + /* remove all neigh connected to a given path or mcast */ + spin_lock_irqsave(&priv->lock, flags); + + htbl = rcu_dereference_protected(ntbl->htbl, + lockdep_is_held(&priv->lock)); + + if (!htbl) + goto out_unlock; + + for (i = 0; i < htbl->size; i++) { + struct ipoib_neigh *neigh; + struct ipoib_neigh __rcu **np = &htbl->buckets[i]; + + while ((neigh = rcu_dereference_protected(*np, + lockdep_is_held(&priv->lock))) != NULL) { + /* delete neighs belong to this parent */ + if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { + rcu_assign_pointer(*np, + rcu_dereference_protected(neigh->hnext, + lockdep_is_held(&priv->lock))); + /* remove from parent list */ + list_del_init(&neigh->list); + call_rcu(&neigh->rcu, ipoib_neigh_reclaim); + } else { + np = &neigh->hnext; + } + + } + } +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) +{ + struct ipoib_neigh_table *ntbl = &priv->ntbl; + struct ipoib_neigh_hash *htbl; + unsigned long flags; + int i, wait_flushed = 0; + + init_completion(&priv->ntbl.flushed); + set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); + + spin_lock_irqsave(&priv->lock, flags); + + htbl = rcu_dereference_protected(ntbl->htbl, + lockdep_is_held(&priv->lock)); + if (!htbl) + goto out_unlock; + + wait_flushed = atomic_read(&priv->ntbl.entries); + if (!wait_flushed) + goto free_htbl; + + for (i = 0; i < htbl->size; i++) { + struct ipoib_neigh *neigh; + struct ipoib_neigh __rcu **np = &htbl->buckets[i]; + + while ((neigh = rcu_dereference_protected(*np, + lockdep_is_held(&priv->lock))) != NULL) { + rcu_assign_pointer(*np, + rcu_dereference_protected(neigh->hnext, + lockdep_is_held(&priv->lock))); + /* remove from path/mc list */ + list_del_init(&neigh->list); + call_rcu(&neigh->rcu, ipoib_neigh_reclaim); + } + } + +free_htbl: + rcu_assign_pointer(ntbl->htbl, NULL); + call_rcu(&htbl->rcu, neigh_hash_free_rcu); + +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + if (wait_flushed) + wait_for_completion(&priv->ntbl.flushed); +} + +static void ipoib_neigh_hash_uninit(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); + init_completion(&priv->ntbl.deleted); + + cancel_delayed_work_sync(&priv->neigh_reap_task); + + ipoib_flush_neighs(priv); + + wait_for_completion(&priv->ntbl.deleted); +} + +static void ipoib_napi_add(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC); + netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE); +} + +static void ipoib_napi_del(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + netif_napi_del(&priv->recv_napi); + netif_napi_del(&priv->send_napi); +} + +static void ipoib_dev_uninit_default(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_transport_dev_cleanup(dev); + + ipoib_napi_del(dev); + + ipoib_cm_dev_cleanup(dev); + + kfree(priv->rx_ring); + vfree(priv->tx_ring); + + priv->rx_ring = NULL; + priv->tx_ring = NULL; +} + +static int ipoib_dev_init_default(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_napi_add(dev); + + /* Allocate RX/TX "rings" to hold queued skbs */ + priv->rx_ring = kcalloc(ipoib_recvq_size, + sizeof(*priv->rx_ring), + GFP_KERNEL); + if (!priv->rx_ring) + goto out; + + priv->tx_ring = vzalloc(array_size(ipoib_sendq_size, + sizeof(*priv->tx_ring))); + if (!priv->tx_ring) { + pr_warn("%s: failed to allocate TX ring (%d entries)\n", + priv->ca->name, ipoib_sendq_size); + goto out_rx_ring_cleanup; + } + + /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */ + + if (ipoib_transport_dev_init(dev, priv->ca)) { + pr_warn("%s: ipoib_transport_dev_init failed\n", + priv->ca->name); + goto out_tx_ring_cleanup; + } + + /* after qp created set dev address */ + priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; + priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; + priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff; + + return 0; + +out_tx_ring_cleanup: + vfree(priv->tx_ring); + +out_rx_ring_cleanup: + kfree(priv->rx_ring); + +out: + ipoib_napi_del(dev); + return -ENOMEM; +} + +static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (!priv->rn_ops->ndo_do_ioctl) + return -EOPNOTSUPP; + + return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd); +} + +static int ipoib_dev_init(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret = -ENOMEM; + + priv->qp = NULL; + + /* + * the various IPoIB tasks assume they will never race against + * themselves, so always use a single thread workqueue + */ + priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM); + if (!priv->wq) { + pr_warn("%s: failed to allocate device WQ\n", dev->name); + goto out; + } + + /* create pd, which used both for control and datapath*/ + priv->pd = ib_alloc_pd(priv->ca, 0); + if (IS_ERR(priv->pd)) { + pr_warn("%s: failed to allocate PD\n", priv->ca->name); + goto clean_wq; + } + + ret = priv->rn_ops->ndo_init(dev); + if (ret) { + pr_warn("%s failed to init HW resource\n", dev->name); + goto out_free_pd; + } + + ret = ipoib_neigh_hash_init(priv); + if (ret) { + pr_warn("%s failed to init neigh hash\n", dev->name); + goto out_dev_uninit; + } + + if (dev->flags & IFF_UP) { + if (ipoib_ib_dev_open(dev)) { + pr_warn("%s failed to open device\n", dev->name); + ret = -ENODEV; + goto out_hash_uninit; + } + } + + return 0; + +out_hash_uninit: + ipoib_neigh_hash_uninit(dev); + +out_dev_uninit: + ipoib_ib_dev_cleanup(dev); + +out_free_pd: + if (priv->pd) { + ib_dealloc_pd(priv->pd); + priv->pd = NULL; + } + +clean_wq: + if (priv->wq) { + destroy_workqueue(priv->wq); + priv->wq = NULL; + } + +out: + return ret; +} + +/* + * This must be called before doing an unregister_netdev on a parent device to + * shutdown the IB event handler. + */ +static void ipoib_parent_unregister_pre(struct net_device *ndev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(ndev); + + /* + * ipoib_set_mac checks netif_running before pushing work, clearing + * running ensures the it will not add more work. + */ + rtnl_lock(); + dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); + rtnl_unlock(); + + /* ipoib_event() cannot be running once this returns */ + ib_unregister_event_handler(&priv->event_handler); + + /* + * Work on the queue grabs the rtnl lock, so this cannot be done while + * also holding it. + */ + flush_workqueue(ipoib_workqueue); +} + +static void ipoib_set_dev_features(struct ipoib_dev_priv *priv) +{ + priv->hca_caps = priv->ca->attrs.device_cap_flags; + + if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { + priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + if (priv->hca_caps & IB_DEVICE_UD_TSO) + priv->dev->hw_features |= NETIF_F_TSO; + + priv->dev->features |= priv->dev->hw_features; + } +} + +static int ipoib_parent_init(struct net_device *ndev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(ndev); + struct ib_port_attr attr; + int result; + + result = ib_query_port(priv->ca, priv->port, &attr); + if (result) { + pr_warn("%s: ib_query_port %d failed\n", priv->ca->name, + priv->port); + return result; + } + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); + + result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); + if (result) { + pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n", + priv->ca->name, priv->port, result); + return result; + } + + result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid); + if (result) { + pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n", + priv->ca->name, priv->port, result); + return result; + } + memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, + sizeof(union ib_gid)); + + SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent); + priv->dev->dev_port = priv->port - 1; + /* Let's set this one too for backwards compatibility. */ + priv->dev->dev_id = priv->port - 1; + + return 0; +} + +static void ipoib_child_init(struct net_device *ndev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(ndev); + struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); + + priv->max_ib_mtu = ppriv->max_ib_mtu; + set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); + memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); + memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid)); +} + +static int ipoib_ndo_init(struct net_device *ndev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(ndev); + int rc; + + if (priv->parent) { + ipoib_child_init(ndev); + } else { + rc = ipoib_parent_init(ndev); + if (rc) + return rc; + } + + /* MTU will be reset when mcast join happens */ + ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); + priv->mcast_mtu = priv->admin_mtu = ndev->mtu; + ndev->max_mtu = IPOIB_CM_MTU; + + ndev->neigh_priv_len = sizeof(struct ipoib_neigh); + + /* + * Set the full membership bit, so that we join the right + * broadcast group, etc. + */ + priv->pkey |= 0x8000; + + ndev->broadcast[8] = priv->pkey >> 8; + ndev->broadcast[9] = priv->pkey & 0xff; + set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); + + ipoib_set_dev_features(priv); + + rc = ipoib_dev_init(ndev); + if (rc) { + pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n", + priv->ca->name, priv->dev->name, priv->port, rc); + return rc; + } + + if (priv->parent) { + struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); + + dev_hold(priv->parent); + + down_write(&ppriv->vlan_rwsem); + list_add_tail(&priv->list, &ppriv->child_intfs); + up_write(&ppriv->vlan_rwsem); + } + + return 0; +} + +static void ipoib_ndo_uninit(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ASSERT_RTNL(); + + /* + * ipoib_remove_one guarantees the children are removed before the + * parent, and that is the only place where a parent can be removed. + */ + WARN_ON(!list_empty(&priv->child_intfs)); + + if (priv->parent) { + struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); + + down_write(&ppriv->vlan_rwsem); + list_del(&priv->list); + up_write(&ppriv->vlan_rwsem); + } + + ipoib_neigh_hash_uninit(dev); + + ipoib_ib_dev_cleanup(dev); + + /* no more works over the priv->wq */ + if (priv->wq) { + /* See ipoib_mcast_carrier_on_task() */ + WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)); + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); + priv->wq = NULL; + } + + if (priv->parent) + dev_put(priv->parent); +} + +static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state); +} + +static int ipoib_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivf) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int err; + + err = ib_get_vf_config(priv->ca, vf, priv->port, ivf); + if (err) + return err; + + ivf->vf = vf; + memcpy(ivf->mac, dev->dev_addr, dev->addr_len); + + return 0; +} + +static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID) + return -EINVAL; + + return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type); +} + +static int ipoib_get_vf_stats(struct net_device *dev, int vf, + struct ifla_vf_stats *vf_stats) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats); +} + +static const struct header_ops ipoib_header_ops = { + .create = ipoib_hard_header, +}; + +static const struct net_device_ops ipoib_netdev_ops_pf = { + .ndo_init = ipoib_ndo_init, + .ndo_uninit = ipoib_ndo_uninit, + .ndo_open = ipoib_open, + .ndo_stop = ipoib_stop, + .ndo_change_mtu = ipoib_change_mtu, + .ndo_fix_features = ipoib_fix_features, + .ndo_start_xmit = ipoib_start_xmit, + .ndo_tx_timeout = ipoib_timeout, + .ndo_set_rx_mode = ipoib_set_mcast_list, + .ndo_get_iflink = ipoib_get_iflink, + .ndo_set_vf_link_state = ipoib_set_vf_link_state, + .ndo_get_vf_config = ipoib_get_vf_config, + .ndo_get_vf_stats = ipoib_get_vf_stats, + .ndo_set_vf_guid = ipoib_set_vf_guid, + .ndo_set_mac_address = ipoib_set_mac, + .ndo_get_stats64 = ipoib_get_stats, + .ndo_do_ioctl = ipoib_ioctl, +}; + +static const struct net_device_ops ipoib_netdev_ops_vf = { + .ndo_init = ipoib_ndo_init, + .ndo_uninit = ipoib_ndo_uninit, + .ndo_open = ipoib_open, + .ndo_stop = ipoib_stop, + .ndo_change_mtu = ipoib_change_mtu, + .ndo_fix_features = ipoib_fix_features, + .ndo_start_xmit = ipoib_start_xmit, + .ndo_tx_timeout = ipoib_timeout, + .ndo_set_rx_mode = ipoib_set_mcast_list, + .ndo_get_iflink = ipoib_get_iflink, + .ndo_get_stats64 = ipoib_get_stats, + .ndo_do_ioctl = ipoib_ioctl, +}; + +void ipoib_setup_common(struct net_device *dev) +{ + dev->header_ops = &ipoib_header_ops; + + ipoib_set_ethtool_ops(dev); + + dev->watchdog_timeo = HZ; + + dev->flags |= IFF_BROADCAST | IFF_MULTICAST; + + dev->hard_header_len = IPOIB_HARD_LEN; + dev->addr_len = INFINIBAND_ALEN; + dev->type = ARPHRD_INFINIBAND; + dev->tx_queue_len = ipoib_sendq_size * 2; + dev->features = (NETIF_F_VLAN_CHALLENGED | + NETIF_F_HIGHDMA); + netif_keep_dst(dev); + + memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); + + /* + * unregister_netdev always frees the netdev, we use this mode + * consistently to unify all the various unregister paths, including + * those connected to rtnl_link_ops which require it. + */ + dev->needs_free_netdev = true; +} + +static void ipoib_build_priv(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + priv->dev = dev; + spin_lock_init(&priv->lock); + init_rwsem(&priv->vlan_rwsem); + mutex_init(&priv->mcast_mutex); + + INIT_LIST_HEAD(&priv->path_list); + INIT_LIST_HEAD(&priv->child_intfs); + INIT_LIST_HEAD(&priv->dead_ahs); + INIT_LIST_HEAD(&priv->multicast_list); + + INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); + INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); + INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); + INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); + INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); + INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); + INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); + INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); +} + +static const struct net_device_ops ipoib_netdev_default_pf = { + .ndo_init = ipoib_dev_init_default, + .ndo_uninit = ipoib_dev_uninit_default, + .ndo_open = ipoib_ib_dev_open_default, + .ndo_stop = ipoib_ib_dev_stop_default, +}; + +static struct net_device +*ipoib_create_netdev_default(struct ib_device *hca, + const char *name, + unsigned char name_assign_type, + void (*setup)(struct net_device *)) +{ + struct net_device *dev; + struct rdma_netdev *rn; + + dev = alloc_netdev((int)sizeof(struct rdma_netdev), + name, + name_assign_type, setup); + if (!dev) + return NULL; + + rn = netdev_priv(dev); + + rn->send = ipoib_send; + rn->attach_mcast = ipoib_mcast_attach; + rn->detach_mcast = ipoib_mcast_detach; + rn->hca = hca; + dev->netdev_ops = &ipoib_netdev_default_pf; + + return dev; +} + +static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port, + const char *name) +{ + struct net_device *dev; + + if (hca->alloc_rdma_netdev) { + dev = hca->alloc_rdma_netdev(hca, port, + RDMA_NETDEV_IPOIB, name, + NET_NAME_UNKNOWN, + ipoib_setup_common); + if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP) + return NULL; + } + + if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP) + dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN, + ipoib_setup_common); + + return dev; +} + +struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port, + const char *name) +{ + struct net_device *dev; + struct ipoib_dev_priv *priv; + struct rdma_netdev *rn; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + priv->ca = hca; + priv->port = port; + + dev = ipoib_get_netdev(hca, port, name); + if (!dev) + goto free_priv; + + priv->rn_ops = dev->netdev_ops; + + /* fixme : should be after the query_cap */ + if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION) + dev->netdev_ops = &ipoib_netdev_ops_vf; + else + dev->netdev_ops = &ipoib_netdev_ops_pf; + + rn = netdev_priv(dev); + rn->clnt_priv = priv; + + /* + * Only the child register_netdev flows can handle priv_destructor + * being set, so we force it to NULL here and handle manually until it + * is safe to turn on. + */ + priv->next_priv_destructor = dev->priv_destructor; + dev->priv_destructor = NULL; + + ipoib_build_priv(dev); + + return priv; +free_priv: + kfree(priv); + return NULL; +} + +void ipoib_intf_free(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + + dev->priv_destructor = priv->next_priv_destructor; + if (dev->priv_destructor) + dev->priv_destructor(dev); + + /* + * There are some error flows around register_netdev failing that may + * attempt to call priv_destructor twice, prevent that from happening. + */ + dev->priv_destructor = NULL; + + /* unregister/destroy is very complicated. Make bugs more obvious. */ + rn->clnt_priv = NULL; + + kfree(priv); +} + +static ssize_t show_pkey(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = to_net_dev(dev); + struct ipoib_dev_priv *priv = ipoib_priv(ndev); + + return sprintf(buf, "0x%04x\n", priv->pkey); +} +static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); + +static ssize_t show_umcast(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = to_net_dev(dev); + struct ipoib_dev_priv *priv = ipoib_priv(ndev); + + return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); +} + +void ipoib_set_umcast(struct net_device *ndev, int umcast_val) +{ + struct ipoib_dev_priv *priv = ipoib_priv(ndev); + + if (umcast_val > 0) { + set_bit(IPOIB_FLAG_UMCAST, &priv->flags); + ipoib_warn(priv, "ignoring multicast groups joined directly " + "by userspace\n"); + } else + clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); +} + +static ssize_t set_umcast(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long umcast_val = simple_strtoul(buf, NULL, 0); + + ipoib_set_umcast(to_net_dev(dev), umcast_val); + + return count; +} +static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); + +int ipoib_add_umcast_attr(struct net_device *dev) +{ + return device_create_file(&dev->dev, &dev_attr_umcast); +} + +static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) +{ + struct ipoib_dev_priv *child_priv; + struct net_device *netdev = priv->dev; + + netif_addr_lock_bh(netdev); + + memcpy(&priv->local_gid.global.interface_id, + &gid->global.interface_id, + sizeof(gid->global.interface_id)); + memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); + clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); + + netif_addr_unlock_bh(netdev); + + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { + down_read(&priv->vlan_rwsem); + list_for_each_entry(child_priv, &priv->child_intfs, list) + set_base_guid(child_priv, gid); + up_read(&priv->vlan_rwsem); + } +} + +static int ipoib_check_lladdr(struct net_device *dev, + struct sockaddr_storage *ss) +{ + union ib_gid *gid = (union ib_gid *)(ss->__data + 4); + int ret = 0; + + netif_addr_lock_bh(dev); + + /* Make sure the QPN, reserved and subnet prefix match the current + * lladdr, it also makes sure the lladdr is unicast. + */ + if (memcmp(dev->dev_addr, ss->__data, + 4 + sizeof(gid->global.subnet_prefix)) || + gid->global.interface_id == 0) + ret = -EINVAL; + + netif_addr_unlock_bh(dev); + + return ret; +} + +static int ipoib_set_mac(struct net_device *dev, void *addr) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct sockaddr_storage *ss = addr; + int ret; + + if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) + return -EBUSY; + + ret = ipoib_check_lladdr(dev, ss); + if (ret) + return ret; + + set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); + + queue_work(ipoib_workqueue, &priv->flush_light); + + return 0; +} + +static ssize_t create_child(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int pkey; + int ret; + + if (sscanf(buf, "%i", &pkey) != 1) + return -EINVAL; + + if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) + return -EINVAL; + + ret = ipoib_vlan_add(to_net_dev(dev), pkey); + + return ret ? ret : count; +} +static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); + +static ssize_t delete_child(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int pkey; + int ret; + + if (sscanf(buf, "%i", &pkey) != 1) + return -EINVAL; + + if (pkey < 0 || pkey > 0xffff) + return -EINVAL; + + ret = ipoib_vlan_delete(to_net_dev(dev), pkey); + + return ret ? ret : count; + +} +static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); + +int ipoib_add_pkey_attr(struct net_device *dev) +{ + return device_create_file(&dev->dev, &dev_attr_pkey); +} + +static struct net_device *ipoib_add_port(const char *format, + struct ib_device *hca, u8 port) +{ + struct ipoib_dev_priv *priv; + struct net_device *ndev; + int result; + + priv = ipoib_intf_alloc(hca, port, format); + if (!priv) { + pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port); + return ERR_PTR(-ENOMEM); + } + ndev = priv->dev; + + INIT_IB_EVENT_HANDLER(&priv->event_handler, + priv->ca, ipoib_event); + ib_register_event_handler(&priv->event_handler); + + /* call event handler to ensure pkey in sync */ + queue_work(ipoib_workqueue, &priv->flush_heavy); + + result = register_netdev(ndev); + if (result) { + pr_warn("%s: couldn't register ipoib port %d; error %d\n", + hca->name, port, result); + + ipoib_parent_unregister_pre(ndev); + ipoib_intf_free(ndev); + free_netdev(ndev); + + return ERR_PTR(result); + } + + /* + * We cannot set priv_destructor before register_netdev because we + * need priv to be always valid during the error flow to execute + * ipoib_parent_unregister_pre(). Instead handle it manually and only + * enter priv_destructor mode once we are completely registered. + */ + ndev->priv_destructor = ipoib_intf_free; + + if (ipoib_cm_add_mode_attr(ndev)) + goto sysfs_failed; + if (ipoib_add_pkey_attr(ndev)) + goto sysfs_failed; + if (ipoib_add_umcast_attr(ndev)) + goto sysfs_failed; + if (device_create_file(&ndev->dev, &dev_attr_create_child)) + goto sysfs_failed; + if (device_create_file(&ndev->dev, &dev_attr_delete_child)) + goto sysfs_failed; + + return ndev; + +sysfs_failed: + ipoib_parent_unregister_pre(ndev); + unregister_netdev(ndev); + return ERR_PTR(-ENOMEM); +} + +static void ipoib_add_one(struct ib_device *device) +{ + struct list_head *dev_list; + struct net_device *dev; + struct ipoib_dev_priv *priv; + int p; + int count = 0; + + dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL); + if (!dev_list) + return; + + INIT_LIST_HEAD(dev_list); + + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { + if (!rdma_protocol_ib(device, p)) + continue; + dev = ipoib_add_port("ib%d", device, p); + if (!IS_ERR(dev)) { + priv = ipoib_priv(dev); + list_add_tail(&priv->list, dev_list); + count++; + } + } + + if (!count) { + kfree(dev_list); + return; + } + + ib_set_client_data(device, &ipoib_client, dev_list); +} + +static void ipoib_remove_one(struct ib_device *device, void *client_data) +{ + struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; + struct list_head *dev_list = client_data; + + if (!dev_list) + return; + + list_for_each_entry_safe(priv, tmp, dev_list, list) { + LIST_HEAD(head); + ipoib_parent_unregister_pre(priv->dev); + + rtnl_lock(); + + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, + list) + unregister_netdevice_queue(cpriv->dev, &head); + unregister_netdevice_queue(priv->dev, &head); + unregister_netdevice_many(&head); + + rtnl_unlock(); + } + + kfree(dev_list); +} + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +static struct notifier_block ipoib_netdev_notifier = { + .notifier_call = ipoib_netdev_event, +}; +#endif + +static int __init ipoib_init_module(void) +{ + int ret; + + ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); + ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); + ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); + + ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); + ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); + ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); +#ifdef CONFIG_INFINIBAND_IPOIB_CM + ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); + ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); +#endif + + /* + * When copying small received packets, we only copy from the + * linear data part of the SKB, so we rely on this condition. + */ + BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); + + ret = ipoib_register_debugfs(); + if (ret) + return ret; + + /* + * We create a global workqueue here that is used for all flush + * operations. However, if you attempt to flush a workqueue + * from a task on that same workqueue, it deadlocks the system. + * We want to be able to flush the tasks associated with a + * specific net device, so we also create a workqueue for each + * netdevice. We queue up the tasks for that device only on + * its private workqueue, and we only queue up flush events + * on our global flush workqueue. This avoids the deadlocks. + */ + ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0); + if (!ipoib_workqueue) { + ret = -ENOMEM; + goto err_fs; + } + + ib_sa_register_client(&ipoib_sa_client); + + ret = ib_register_client(&ipoib_client); + if (ret) + goto err_sa; + + ret = ipoib_netlink_init(); + if (ret) + goto err_client; + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + register_netdevice_notifier(&ipoib_netdev_notifier); +#endif + return 0; + +err_client: + ib_unregister_client(&ipoib_client); + +err_sa: + ib_sa_unregister_client(&ipoib_sa_client); + destroy_workqueue(ipoib_workqueue); + +err_fs: + ipoib_unregister_debugfs(); + + return ret; +} + +static void __exit ipoib_cleanup_module(void) +{ +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + unregister_netdevice_notifier(&ipoib_netdev_notifier); +#endif + ipoib_netlink_fini(); + ib_unregister_client(&ipoib_client); + ib_sa_unregister_client(&ipoib_sa_client); + ipoib_unregister_debugfs(); + destroy_workqueue(ipoib_workqueue); +} + +module_init(ipoib_init_module); +module_exit(ipoib_cleanup_module); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c new file mode 100644 index 000000000..b9e9562f5 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -0,0 +1,1065 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/skbuff.h> +#include <linux/rtnetlink.h> +#include <linux/moduleparam.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/igmp.h> +#include <linux/inetdevice.h> +#include <linux/delay.h> +#include <linux/completion.h> +#include <linux/slab.h> + +#include <net/dst.h> + +#include "ipoib.h" + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +static int mcast_debug_level; + +module_param(mcast_debug_level, int, 0644); +MODULE_PARM_DESC(mcast_debug_level, + "Enable multicast debug tracing if > 0"); +#endif + +struct ipoib_mcast_iter { + struct net_device *dev; + union ib_gid mgid; + unsigned long created; + unsigned int queuelen; + unsigned int complete; + unsigned int send_only; +}; + +/* join state that allows creating mcg with sendonly member request */ +#define SENDONLY_FULLMEMBER_JOIN 8 + +/* + * This should be called with the priv->lock held + */ +static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, + struct ipoib_mcast *mcast, + bool delay) +{ + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + return; + + /* + * We will be scheduling *something*, so cancel whatever is + * currently scheduled first + */ + cancel_delayed_work(&priv->mcast_task); + if (mcast && delay) { + /* + * We had a failure and want to schedule a retry later + */ + mcast->backoff *= 2; + if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) + mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + mcast->delay_until = jiffies + (mcast->backoff * HZ); + /* + * Mark this mcast for its delay, but restart the + * task immediately. The join task will make sure to + * clear out all entries without delays, and then + * schedule itself to run again when the earliest + * delay expires + */ + queue_delayed_work(priv->wq, &priv->mcast_task, 0); + } else if (delay) { + /* + * Special case of retrying after a failure to + * allocate the broadcast multicast group, wait + * 1 second and try again + */ + queue_delayed_work(priv->wq, &priv->mcast_task, HZ); + } else + queue_delayed_work(priv->wq, &priv->mcast_task, 0); +} + +static void ipoib_mcast_free(struct ipoib_mcast *mcast) +{ + struct net_device *dev = mcast->dev; + int tx_dropped = 0; + + ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n", + mcast->mcmember.mgid.raw); + + /* remove all neigh connected to this mcast */ + ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw); + + if (mcast->ah) + ipoib_put_ah(mcast->ah); + + while (!skb_queue_empty(&mcast->pkt_queue)) { + ++tx_dropped; + dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); + } + + netif_tx_lock_bh(dev); + dev->stats.tx_dropped += tx_dropped; + netif_tx_unlock_bh(dev); + + kfree(mcast); +} + +static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, + int can_sleep) +{ + struct ipoib_mcast *mcast; + + mcast = kzalloc(sizeof(*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC); + if (!mcast) + return NULL; + + mcast->dev = dev; + mcast->created = jiffies; + mcast->delay_until = jiffies; + mcast->backoff = 1; + + INIT_LIST_HEAD(&mcast->list); + INIT_LIST_HEAD(&mcast->neigh_list); + skb_queue_head_init(&mcast->pkt_queue); + + return mcast; +} + +static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rb_node *n = priv->multicast_tree.rb_node; + + while (n) { + struct ipoib_mcast *mcast; + int ret; + + mcast = rb_entry(n, struct ipoib_mcast, rb_node); + + ret = memcmp(mgid, mcast->mcmember.mgid.raw, + sizeof (union ib_gid)); + if (ret < 0) + n = n->rb_left; + else if (ret > 0) + n = n->rb_right; + else + return mcast; + } + + return NULL; +} + +static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; + + while (*n) { + struct ipoib_mcast *tmcast; + int ret; + + pn = *n; + tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); + + ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, + sizeof (union ib_gid)); + if (ret < 0) + n = &pn->rb_left; + else if (ret > 0) + n = &pn->rb_right; + else + return -EEXIST; + } + + rb_link_node(&mcast->rb_node, pn, n); + rb_insert_color(&mcast->rb_node, &priv->multicast_tree); + + return 0; +} + +static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, + struct ib_sa_mcmember_rec *mcmember) +{ + struct net_device *dev = mcast->dev; + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + struct ipoib_ah *ah; + struct rdma_ah_attr av; + int ret; + int set_qkey = 0; + + mcast->mcmember = *mcmember; + + /* Set the multicast MTU and cached Q_Key before we attach if it's + * the broadcast group. + */ + if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, + sizeof (union ib_gid))) { + spin_lock_irq(&priv->lock); + if (!priv->broadcast) { + spin_unlock_irq(&priv->lock); + return -EAGAIN; + } + /*update priv member according to the new mcast*/ + priv->broadcast->mcmember.qkey = mcmember->qkey; + priv->broadcast->mcmember.mtu = mcmember->mtu; + priv->broadcast->mcmember.traffic_class = mcmember->traffic_class; + priv->broadcast->mcmember.rate = mcmember->rate; + priv->broadcast->mcmember.sl = mcmember->sl; + priv->broadcast->mcmember.flow_label = mcmember->flow_label; + priv->broadcast->mcmember.hop_limit = mcmember->hop_limit; + /* assume if the admin and the mcast are the same both can be changed */ + if (priv->mcast_mtu == priv->admin_mtu) + priv->admin_mtu = + priv->mcast_mtu = + IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); + else + priv->mcast_mtu = + IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); + + priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); + spin_unlock_irq(&priv->lock); + priv->tx_wr.remote_qkey = priv->qkey; + set_qkey = 1; + } + + if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { + ipoib_warn(priv, "multicast group %pI6 already attached\n", + mcast->mcmember.mgid.raw); + + return 0; + } + + ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid, + be16_to_cpu(mcast->mcmember.mlid), + set_qkey, priv->qkey); + if (ret < 0) { + ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n", + mcast->mcmember.mgid.raw); + + clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); + return ret; + } + } + + memset(&av, 0, sizeof(av)); + av.type = rdma_ah_find_type(priv->ca, priv->port); + rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid)), + rdma_ah_set_port_num(&av, priv->port); + rdma_ah_set_sl(&av, mcast->mcmember.sl); + rdma_ah_set_static_rate(&av, mcast->mcmember.rate); + + rdma_ah_set_grh(&av, &mcast->mcmember.mgid, + be32_to_cpu(mcast->mcmember.flow_label), + 0, mcast->mcmember.hop_limit, + mcast->mcmember.traffic_class); + + ah = ipoib_create_ah(dev, priv->pd, &av); + if (IS_ERR(ah)) { + ipoib_warn(priv, "ib_address_create failed %ld\n", + -PTR_ERR(ah)); + /* use original error */ + return PTR_ERR(ah); + } + spin_lock_irq(&priv->lock); + mcast->ah = ah; + spin_unlock_irq(&priv->lock); + + ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n", + mcast->mcmember.mgid.raw, + mcast->ah->ah, + be16_to_cpu(mcast->mcmember.mlid), + mcast->mcmember.sl); + + /* actually send any queued packets */ + netif_tx_lock_bh(dev); + while (!skb_queue_empty(&mcast->pkt_queue)) { + struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); + + netif_tx_unlock_bh(dev); + + skb->dev = dev; + + ret = dev_queue_xmit(skb); + if (ret) + ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n", + __func__, ret); + netif_tx_lock_bh(dev); + } + netif_tx_unlock_bh(dev); + + return 0; +} + +void ipoib_mcast_carrier_on_task(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, + carrier_on_task); + struct ib_port_attr attr; + + if (ib_query_port(priv->ca, priv->port, &attr) || + attr.state != IB_PORT_ACTIVE) { + ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); + return; + } + /* + * Check if can send sendonly MCG's with sendonly-fullmember join state. + * It done here after the successfully join to the broadcast group, + * because the broadcast group must always be joined first and is always + * re-joined if the SM changes substantially. + */ + priv->sm_fullmember_sendonly_support = + ib_sa_sendonly_fullmem_support(&ipoib_sa_client, + priv->ca, priv->port); + /* + * Take rtnl_lock to avoid racing with ipoib_stop() and + * turning the carrier back on while a device is being + * removed. However, ipoib_stop() will attempt to flush + * the workqueue while holding the rtnl lock, so loop + * on trylock until either we get the lock or we see + * FLAG_OPER_UP go away as that signals that we are bailing + * and can safely ignore the carrier on work. + */ + while (!rtnl_trylock()) { + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + return; + else + msleep(20); + } + if (!ipoib_cm_admin_enabled(priv->dev)) + dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); + netif_carrier_on(priv->dev); + rtnl_unlock(); +} + +static int ipoib_mcast_join_complete(int status, + struct ib_sa_multicast *multicast) +{ + struct ipoib_mcast *mcast = multicast->context; + struct net_device *dev = mcast->dev; + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n", + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? + "sendonly " : "", + mcast->mcmember.mgid.raw, status); + + /* We trap for port events ourselves. */ + if (status == -ENETRESET) { + status = 0; + goto out; + } + + if (!status) + status = ipoib_mcast_join_finish(mcast, &multicast->rec); + + if (!status) { + mcast->backoff = 1; + mcast->delay_until = jiffies; + + /* + * Defer carrier on work to priv->wq to avoid a + * deadlock on rtnl_lock here. Requeue our multicast + * work too, which will end up happening right after + * our carrier on task work and will allow us to + * send out all of the non-broadcast joins + */ + if (mcast == priv->broadcast) { + spin_lock_irq(&priv->lock); + queue_work(priv->wq, &priv->carrier_on_task); + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + goto out_locked; + } + } else { + bool silent_fail = + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && + status == -EINVAL; + + if (mcast->logcount < 20) { + if (status == -ETIMEDOUT || status == -EAGAIN || + silent_fail) { + ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n", + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", + mcast->mcmember.mgid.raw, status); + } else { + ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n", + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", + mcast->mcmember.mgid.raw, status); + } + + if (!silent_fail) + mcast->logcount++; + } + + if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && + mcast->backoff >= 2) { + /* + * We only retry sendonly joins once before we drop + * the packet and quit trying to deal with the + * group. However, we leave the group in the + * mcast list as an unjoined group. If we want to + * try joining again, we simply queue up a packet + * and restart the join thread. The empty queue + * is why the join thread ignores this group. + */ + mcast->backoff = 1; + netif_tx_lock_bh(dev); + while (!skb_queue_empty(&mcast->pkt_queue)) { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); + } + netif_tx_unlock_bh(dev); + } else { + spin_lock_irq(&priv->lock); + /* Requeue this join task with a backoff delay */ + __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + goto out_locked; + } + } +out: + spin_lock_irq(&priv->lock); +out_locked: + /* + * Make sure to set mcast->mc before we clear the busy flag to avoid + * racing with code that checks for BUSY before checking mcast->mc + */ + if (status) + mcast->mc = NULL; + else + mcast->mc = multicast; + clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + spin_unlock_irq(&priv->lock); + complete(&mcast->done); + + return status; +} + +/* + * Caller must hold 'priv->lock' + */ +static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_sa_multicast *multicast; + struct ib_sa_mcmember_rec rec = { + .join_state = 1 + }; + ib_sa_comp_mask comp_mask; + int ret = 0; + + if (!priv->broadcast || + !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + return -EINVAL; + + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + + ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); + + rec.mgid = mcast->mcmember.mgid; + rec.port_gid = priv->local_gid; + rec.pkey = cpu_to_be16(priv->pkey); + + comp_mask = + IB_SA_MCMEMBER_REC_MGID | + IB_SA_MCMEMBER_REC_PORT_GID | + IB_SA_MCMEMBER_REC_PKEY | + IB_SA_MCMEMBER_REC_JOIN_STATE; + + if (mcast != priv->broadcast) { + /* + * RFC 4391: + * The MGID MUST use the same P_Key, Q_Key, SL, MTU, + * and HopLimit as those used in the broadcast-GID. The rest + * of attributes SHOULD follow the values used in the + * broadcast-GID as well. + */ + comp_mask |= + IB_SA_MCMEMBER_REC_QKEY | + IB_SA_MCMEMBER_REC_MTU_SELECTOR | + IB_SA_MCMEMBER_REC_MTU | + IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | + IB_SA_MCMEMBER_REC_RATE_SELECTOR | + IB_SA_MCMEMBER_REC_RATE | + IB_SA_MCMEMBER_REC_SL | + IB_SA_MCMEMBER_REC_FLOW_LABEL | + IB_SA_MCMEMBER_REC_HOP_LIMIT; + + rec.qkey = priv->broadcast->mcmember.qkey; + rec.mtu_selector = IB_SA_EQ; + rec.mtu = priv->broadcast->mcmember.mtu; + rec.traffic_class = priv->broadcast->mcmember.traffic_class; + rec.rate_selector = IB_SA_EQ; + rec.rate = priv->broadcast->mcmember.rate; + rec.sl = priv->broadcast->mcmember.sl; + rec.flow_label = priv->broadcast->mcmember.flow_label; + rec.hop_limit = priv->broadcast->mcmember.hop_limit; + + /* + * Send-only IB Multicast joins work at the core IB layer but + * require specific SM support. + * We can use such joins here only if the current SM supports that feature. + * However, if not, we emulate an Ethernet multicast send, + * which does not require a multicast subscription and will + * still send properly. The most appropriate thing to + * do is to create the group if it doesn't exist as that + * most closely emulates the behavior, from a user space + * application perspective, of Ethernet multicast operation. + */ + if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && + priv->sm_fullmember_sendonly_support) + /* SM supports sendonly-fullmember, otherwise fallback to full-member */ + rec.join_state = SENDONLY_FULLMEMBER_JOIN; + } + spin_unlock_irq(&priv->lock); + + multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, + &rec, comp_mask, GFP_KERNEL, + ipoib_mcast_join_complete, mcast); + spin_lock_irq(&priv->lock); + if (IS_ERR(multicast)) { + ret = PTR_ERR(multicast); + ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); + /* Requeue this join task with a backoff delay */ + __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + spin_unlock_irq(&priv->lock); + complete(&mcast->done); + spin_lock_irq(&priv->lock); + } + return 0; +} + +void ipoib_mcast_join_task(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, mcast_task.work); + struct net_device *dev = priv->dev; + struct ib_port_attr port_attr; + unsigned long delay_until = 0; + struct ipoib_mcast *mcast = NULL; + + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + return; + + if (ib_query_port(priv->ca, priv->port, &port_attr)) { + ipoib_dbg(priv, "ib_query_port() failed\n"); + return; + } + if (port_attr.state != IB_PORT_ACTIVE) { + ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n", + port_attr.state); + return; + } + priv->local_lid = port_attr.lid; + netif_addr_lock_bh(dev); + + if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { + netif_addr_unlock_bh(dev); + return; + } + netif_addr_unlock_bh(dev); + + spin_lock_irq(&priv->lock); + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + goto out; + + if (!priv->broadcast) { + struct ipoib_mcast *broadcast; + + broadcast = ipoib_mcast_alloc(dev, 0); + if (!broadcast) { + ipoib_warn(priv, "failed to allocate broadcast group\n"); + /* + * Restart us after a 1 second delay to retry + * creating our broadcast group and attaching to + * it. Until this succeeds, this ipoib dev is + * completely stalled (multicast wise). + */ + __ipoib_mcast_schedule_join_thread(priv, NULL, 1); + goto out; + } + + memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, + sizeof (union ib_gid)); + priv->broadcast = broadcast; + + __ipoib_mcast_add(dev, priv->broadcast); + } + + if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { + if (IS_ERR_OR_NULL(priv->broadcast->mc) && + !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) { + mcast = priv->broadcast; + if (mcast->backoff > 1 && + time_before(jiffies, mcast->delay_until)) { + delay_until = mcast->delay_until; + mcast = NULL; + } + } + goto out; + } + + /* + * We'll never get here until the broadcast group is both allocated + * and attached + */ + list_for_each_entry(mcast, &priv->multicast_list, list) { + if (IS_ERR_OR_NULL(mcast->mc) && + !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && + (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) || + !skb_queue_empty(&mcast->pkt_queue))) { + if (mcast->backoff == 1 || + time_after_eq(jiffies, mcast->delay_until)) { + /* Found the next unjoined group */ + if (ipoib_mcast_join(dev, mcast)) { + spin_unlock_irq(&priv->lock); + return; + } + } else if (!delay_until || + time_before(mcast->delay_until, delay_until)) + delay_until = mcast->delay_until; + } + } + + mcast = NULL; + ipoib_dbg_mcast(priv, "successfully started all multicast joins\n"); + +out: + if (delay_until) { + cancel_delayed_work(&priv->mcast_task); + queue_delayed_work(priv->wq, &priv->mcast_task, + delay_until - jiffies); + } + if (mcast) + ipoib_mcast_join(dev, mcast); + + spin_unlock_irq(&priv->lock); +} + +void ipoib_mcast_start_thread(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + unsigned long flags; + + ipoib_dbg_mcast(priv, "starting multicast thread\n"); + + spin_lock_irqsave(&priv->lock, flags); + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + spin_unlock_irqrestore(&priv->lock, flags); +} + +int ipoib_mcast_stop_thread(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + ipoib_dbg_mcast(priv, "stopping multicast thread\n"); + + cancel_delayed_work_sync(&priv->mcast_task); + + return 0; +} + +static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + int ret = 0; + + if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n"); + + if (!IS_ERR_OR_NULL(mcast->mc)) + ib_sa_free_multicast(mcast->mc); + + if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { + ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", + mcast->mcmember.mgid.raw); + + /* Remove ourselves from the multicast group */ + ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid, + be16_to_cpu(mcast->mcmember.mlid)); + if (ret) + ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); + } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) + ipoib_dbg(priv, "leaving with no mcmember but not a " + "SENDONLY join\n"); + + return 0; +} + +/* + * Check if the multicast group is sendonly. If so remove it from the maps + * and add to the remove list + */ +void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid, + struct list_head *remove_list) +{ + /* Is this multicast ? */ + if (*mgid == 0xff) { + struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid); + + if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + list_del(&mcast->list); + rb_erase(&mcast->rb_node, &priv->multicast_tree); + list_add_tail(&mcast->list, remove_list); + } + } +} + +void ipoib_mcast_remove_list(struct list_head *remove_list) +{ + struct ipoib_mcast *mcast, *tmcast; + + /* + * make sure the in-flight joins have finished before we attempt + * to leave + */ + list_for_each_entry_safe(mcast, tmcast, remove_list, list) + if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + wait_for_completion(&mcast->done); + + list_for_each_entry_safe(mcast, tmcast, remove_list, list) { + ipoib_mcast_leave(mcast->dev, mcast); + ipoib_mcast_free(mcast); + } +} + +void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct rdma_netdev *rn = netdev_priv(dev); + struct ipoib_mcast *mcast; + unsigned long flags; + void *mgid = daddr + 4; + + spin_lock_irqsave(&priv->lock, flags); + + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || + !priv->broadcast || + !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + goto unlock; + } + + mcast = __ipoib_mcast_find(dev, mgid); + if (!mcast || !mcast->ah) { + if (!mcast) { + /* Let's create a new send only group now */ + ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", + mgid); + + mcast = ipoib_mcast_alloc(dev, 0); + if (!mcast) { + ipoib_warn(priv, "unable to allocate memory " + "for multicast structure\n"); + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + goto unlock; + } + + set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); + memcpy(mcast->mcmember.mgid.raw, mgid, + sizeof (union ib_gid)); + __ipoib_mcast_add(dev, mcast); + list_add_tail(&mcast->list, &priv->multicast_list); + } + if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) { + /* put pseudoheader back on for next time */ + skb_push(skb, sizeof(struct ipoib_pseudo_header)); + skb_queue_tail(&mcast->pkt_queue, skb); + } else { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + } + if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + } + } else { + struct ipoib_neigh *neigh; + + spin_unlock_irqrestore(&priv->lock, flags); + neigh = ipoib_neigh_get(dev, daddr); + spin_lock_irqsave(&priv->lock, flags); + if (!neigh) { + neigh = ipoib_neigh_alloc(daddr, dev); + /* Make sure that the neigh will be added only + * once to mcast list. + */ + if (neigh && list_empty(&neigh->list)) { + kref_get(&mcast->ah->ref); + neigh->ah = mcast->ah; + neigh->ah->valid = 1; + list_add_tail(&neigh->list, &mcast->neigh_list); + } + } + spin_unlock_irqrestore(&priv->lock, flags); + mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah, + IB_MULTICAST_QPN); + if (neigh) + ipoib_neigh_put(neigh); + return; + } + +unlock: + spin_unlock_irqrestore(&priv->lock, flags); +} + +void ipoib_mcast_dev_flush(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + LIST_HEAD(remove_list); + struct ipoib_mcast *mcast, *tmcast; + unsigned long flags; + + mutex_lock(&priv->mcast_mutex); + ipoib_dbg_mcast(priv, "flushing multicast list\n"); + + spin_lock_irqsave(&priv->lock, flags); + + list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { + list_del(&mcast->list); + rb_erase(&mcast->rb_node, &priv->multicast_tree); + list_add_tail(&mcast->list, &remove_list); + } + + if (priv->broadcast) { + rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); + list_add_tail(&priv->broadcast->list, &remove_list); + priv->broadcast = NULL; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + ipoib_mcast_remove_list(&remove_list); + mutex_unlock(&priv->mcast_mutex); +} + +static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) +{ + /* reserved QPN, prefix, scope */ + if (memcmp(addr, broadcast, 6)) + return 0; + /* signature lower, pkey */ + if (memcmp(addr + 7, broadcast + 7, 3)) + return 0; + return 1; +} + +void ipoib_mcast_restart_task(struct work_struct *work) +{ + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, restart_task); + struct net_device *dev = priv->dev; + struct netdev_hw_addr *ha; + struct ipoib_mcast *mcast, *tmcast; + LIST_HEAD(remove_list); + struct ib_sa_mcmember_rec rec; + + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + /* + * shortcut...on shutdown flush is called next, just + * let it do all the work + */ + return; + + ipoib_dbg_mcast(priv, "restarting multicast task\n"); + + netif_addr_lock_bh(dev); + spin_lock_irq(&priv->lock); + + /* + * Unfortunately, the networking core only gives us a list of all of + * the multicast hardware addresses. We need to figure out which ones + * are new and which ones have been removed + */ + + /* Clear out the found flag */ + list_for_each_entry(mcast, &priv->multicast_list, list) + clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); + + /* Mark all of the entries that are found or don't exist */ + netdev_for_each_mc_addr(ha, dev) { + union ib_gid mgid; + + if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast)) + continue; + + memcpy(mgid.raw, ha->addr + 4, sizeof(mgid)); + + mcast = __ipoib_mcast_find(dev, &mgid); + if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + struct ipoib_mcast *nmcast; + + /* ignore group which is directly joined by userspace */ + if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && + !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) { + ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n", + mgid.raw); + continue; + } + + /* Not found or send-only group, let's add a new entry */ + ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n", + mgid.raw); + + nmcast = ipoib_mcast_alloc(dev, 0); + if (!nmcast) { + ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); + continue; + } + + set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); + + nmcast->mcmember.mgid = mgid; + + if (mcast) { + /* Destroy the send only entry */ + list_move_tail(&mcast->list, &remove_list); + + rb_replace_node(&mcast->rb_node, + &nmcast->rb_node, + &priv->multicast_tree); + } else + __ipoib_mcast_add(dev, nmcast); + + list_add_tail(&nmcast->list, &priv->multicast_list); + } + + if (mcast) + set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); + } + + /* Remove all of the entries don't exist anymore */ + list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { + if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && + !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n", + mcast->mcmember.mgid.raw); + + rb_erase(&mcast->rb_node, &priv->multicast_tree); + + /* Move to the remove list */ + list_move_tail(&mcast->list, &remove_list); + } + } + + spin_unlock_irq(&priv->lock); + netif_addr_unlock_bh(dev); + + ipoib_mcast_remove_list(&remove_list); + + /* + * Double check that we are still up + */ + if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { + spin_lock_irq(&priv->lock); + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + spin_unlock_irq(&priv->lock); + } +} + +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + +struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) +{ + struct ipoib_mcast_iter *iter; + + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return NULL; + + iter->dev = dev; + memset(iter->mgid.raw, 0, 16); + + if (ipoib_mcast_iter_next(iter)) { + kfree(iter); + return NULL; + } + + return iter; +} + +int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) +{ + struct ipoib_dev_priv *priv = ipoib_priv(iter->dev); + struct rb_node *n; + struct ipoib_mcast *mcast; + int ret = 1; + + spin_lock_irq(&priv->lock); + + n = rb_first(&priv->multicast_tree); + + while (n) { + mcast = rb_entry(n, struct ipoib_mcast, rb_node); + + if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, + sizeof (union ib_gid)) < 0) { + iter->mgid = mcast->mcmember.mgid; + iter->created = mcast->created; + iter->queuelen = skb_queue_len(&mcast->pkt_queue); + iter->complete = !!mcast->ah; + iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); + + ret = 0; + + break; + } + + n = rb_next(n); + } + + spin_unlock_irq(&priv->lock); + + return ret; +} + +void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, + union ib_gid *mgid, + unsigned long *created, + unsigned int *queuelen, + unsigned int *complete, + unsigned int *send_only) +{ + *mgid = iter->mgid; + *created = iter->created; + *queuelen = iter->queuelen; + *complete = iter->complete; + *send_only = iter->send_only; +} + +#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c new file mode 100644 index 000000000..d4d553a51 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2012 Mellanox Technologies. - All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/netdevice.h> +#include <linux/if_arp.h> /* For ARPHRD_xxx */ +#include <linux/module.h> +#include <net/rtnetlink.h> +#include "ipoib.h" + +static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = { + [IFLA_IPOIB_PKEY] = { .type = NLA_U16 }, + [IFLA_IPOIB_MODE] = { .type = NLA_U16 }, + [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 }, +}; + +static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + u16 val; + + if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey)) + goto nla_put_failure; + + val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); + if (nla_put_u16(skb, IFLA_IPOIB_MODE, val)) + goto nla_put_failure; + + val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags); + if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val)) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + u16 mode, umcast; + int ret = 0; + + if (data[IFLA_IPOIB_MODE]) { + mode = nla_get_u16(data[IFLA_IPOIB_MODE]); + if (mode == IPOIB_MODE_DATAGRAM) + ret = ipoib_set_mode(dev, "datagram\n"); + else if (mode == IPOIB_MODE_CONNECTED) + ret = ipoib_set_mode(dev, "connected\n"); + else + ret = -EINVAL; + + if (ret < 0) + goto out_err; + } + + if (data[IFLA_IPOIB_UMCAST]) { + umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]); + ipoib_set_umcast(dev, umcast); + } + +out_err: + return ret; +} + +static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct net_device *pdev; + struct ipoib_dev_priv *ppriv; + u16 child_pkey; + int err; + + if (!tb[IFLA_LINK]) + return -EINVAL; + + pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!pdev || pdev->type != ARPHRD_INFINIBAND) + return -ENODEV; + + ppriv = ipoib_priv(pdev); + + if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) { + ipoib_warn(ppriv, "child creation disallowed for child devices\n"); + return -EINVAL; + } + + if (!data || !data[IFLA_IPOIB_PKEY]) { + ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n"); + child_pkey = ppriv->pkey; + } else + child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); + + err = __ipoib_vlan_add(ppriv, ipoib_priv(dev), + child_pkey, IPOIB_RTNL_CHILD); + + if (!err && data) + err = ipoib_changelink(dev, tb, data, extack); + return err; +} + +static size_t ipoib_get_size(const struct net_device *dev) +{ + return nla_total_size(2) + /* IFLA_IPOIB_PKEY */ + nla_total_size(2) + /* IFLA_IPOIB_MODE */ + nla_total_size(2); /* IFLA_IPOIB_UMCAST */ +} + +static struct rtnl_link_ops ipoib_link_ops __read_mostly = { + .kind = "ipoib", + .maxtype = IFLA_IPOIB_MAX, + .policy = ipoib_policy, + .priv_size = sizeof(struct ipoib_dev_priv), + .setup = ipoib_setup_common, + .newlink = ipoib_new_child_link, + .changelink = ipoib_changelink, + .get_size = ipoib_get_size, + .fill_info = ipoib_fill_info, +}; + +int __init ipoib_netlink_init(void) +{ + return rtnl_link_register(&ipoib_link_ops); +} + +void __exit ipoib_netlink_fini(void) +{ + rtnl_link_unregister(&ipoib_link_ops); +} + +MODULE_ALIAS_RTNL_LINK("ipoib"); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c new file mode 100644 index 000000000..9f36ca786 --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/slab.h> + +#include "ipoib.h" + +int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca, + union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_qp_attr *qp_attr = NULL; + int ret; + u16 pkey_index; + + if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) { + clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); + ret = -ENXIO; + goto out; + } + set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); + + if (set_qkey) { + ret = -ENOMEM; + qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL); + if (!qp_attr) + goto out; + + /* set correct QKey for QP */ + qp_attr->qkey = qkey; + ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY); + if (ret) { + ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret); + goto out; + } + } + + /* attach QP to multicast group */ + ret = ib_attach_mcast(priv->qp, mgid, mlid); + if (ret) + ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret); + +out: + kfree(qp_attr); + return ret; +} + +int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca, + union ib_gid *mgid, u16 mlid) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret; + + ret = ib_detach_mcast(priv->qp, mgid, mlid); + + return ret; +} + +int ipoib_init_qp(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret; + struct ib_qp_attr qp_attr; + int attr_mask; + + if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) + return -1; + + qp_attr.qp_state = IB_QPS_INIT; + qp_attr.qkey = 0; + qp_attr.port_num = priv->port; + qp_attr.pkey_index = priv->pkey_index; + attr_mask = + IB_QP_QKEY | + IB_QP_PORT | + IB_QP_PKEY_INDEX | + IB_QP_STATE; + ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret); + goto out_fail; + } + + qp_attr.qp_state = IB_QPS_RTR; + /* Can't set this in a INIT->RTR transition */ + attr_mask &= ~IB_QP_PORT; + ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret); + goto out_fail; + } + + qp_attr.qp_state = IB_QPS_RTS; + qp_attr.sq_psn = 0; + attr_mask |= IB_QP_SQ_PSN; + attr_mask &= ~IB_QP_PKEY_INDEX; + ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); + if (ret) { + ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret); + goto out_fail; + } + + return 0; + +out_fail: + qp_attr.qp_state = IB_QPS_RESET; + if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) + ipoib_warn(priv, "Failed to modify QP to RESET state\n"); + + return ret; +} + +int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ib_qp_init_attr init_attr = { + .cap = { + .max_send_wr = ipoib_sendq_size, + .max_recv_wr = ipoib_recvq_size, + .max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge, + MAX_SKB_FRAGS + 1), + .max_recv_sge = IPOIB_UD_RX_SG + }, + .sq_sig_type = IB_SIGNAL_ALL_WR, + .qp_type = IB_QPT_UD + }; + struct ib_cq_init_attr cq_attr = {}; + + int ret, size, req_vec; + int i; + + size = ipoib_recvq_size + 1; + ret = ipoib_cm_dev_init(dev); + if (!ret) { + size += ipoib_sendq_size; + if (ipoib_cm_has_srq(dev)) + size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */ + else + size += ipoib_recvq_size * ipoib_max_conn_qp; + } else + if (ret != -EOPNOTSUPP) + return ret; + + req_vec = (priv->port - 1) * 2; + + cq_attr.cqe = size; + cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors; + priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL, + priv, &cq_attr); + if (IS_ERR(priv->recv_cq)) { + pr_warn("%s: failed to create receive CQ\n", ca->name); + goto out_cm_dev_cleanup; + } + + cq_attr.cqe = ipoib_sendq_size; + cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors; + priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, + priv, &cq_attr); + if (IS_ERR(priv->send_cq)) { + pr_warn("%s: failed to create send CQ\n", ca->name); + goto out_free_recv_cq; + } + + if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP)) + goto out_free_send_cq; + + init_attr.send_cq = priv->send_cq; + init_attr.recv_cq = priv->recv_cq; + + if (priv->hca_caps & IB_DEVICE_UD_TSO) + init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; + + if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK) + init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; + + if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING) + init_attr.create_flags |= IB_QP_CREATE_NETIF_QP; + + priv->qp = ib_create_qp(priv->pd, &init_attr); + if (IS_ERR(priv->qp)) { + pr_warn("%s: failed to create QP\n", ca->name); + goto out_free_send_cq; + } + + if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) + goto out_free_send_cq; + + for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) + priv->tx_sge[i].lkey = priv->pd->local_dma_lkey; + + priv->tx_wr.wr.opcode = IB_WR_SEND; + priv->tx_wr.wr.sg_list = priv->tx_sge; + priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED; + + priv->rx_sge[0].lkey = priv->pd->local_dma_lkey; + + priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); + priv->rx_wr.num_sge = 1; + + priv->rx_wr.next = NULL; + priv->rx_wr.sg_list = priv->rx_sge; + + if (init_attr.cap.max_send_sge > 1) + dev->features |= NETIF_F_SG; + + priv->max_send_sge = init_attr.cap.max_send_sge; + + return 0; + +out_free_send_cq: + ib_destroy_cq(priv->send_cq); + +out_free_recv_cq: + ib_destroy_cq(priv->recv_cq); + +out_cm_dev_cleanup: + ipoib_cm_dev_cleanup(dev); + + return -ENODEV; +} + +void ipoib_transport_dev_cleanup(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (priv->qp) { + if (ib_destroy_qp(priv->qp)) + ipoib_warn(priv, "ib_qp_destroy failed\n"); + + priv->qp = NULL; + } + + if (ib_destroy_cq(priv->send_cq)) + ipoib_warn(priv, "ib_cq_destroy (send) failed\n"); + + if (ib_destroy_cq(priv->recv_cq)) + ipoib_warn(priv, "ib_cq_destroy (recv) failed\n"); +} + +void ipoib_event(struct ib_event_handler *handler, + struct ib_event *record) +{ + struct ipoib_dev_priv *priv = + container_of(handler, struct ipoib_dev_priv, event_handler); + + if (record->element.port_num != priv->port) + return; + + ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event, + record->device->name, record->element.port_num); + + if (record->event == IB_EVENT_SM_CHANGE || + record->event == IB_EVENT_CLIENT_REREGISTER) { + queue_work(ipoib_workqueue, &priv->flush_light); + } else if (record->event == IB_EVENT_PORT_ERR || + record->event == IB_EVENT_PORT_ACTIVE || + record->event == IB_EVENT_LID_CHANGE) { + queue_work(ipoib_workqueue, &priv->flush_normal); + } else if (record->event == IB_EVENT_PKEY_CHANGE) { + queue_work(ipoib_workqueue, &priv->flush_heavy); + } else if (record->event == IB_EVENT_GID_CHANGE && + !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { + queue_work(ipoib_workqueue, &priv->flush_light); + } +} diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c new file mode 100644 index 000000000..341753fbd --- /dev/null +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/sched/signal.h> + +#include <linux/init.h> +#include <linux/seq_file.h> + +#include <linux/uaccess.h> + +#include "ipoib.h" + +static ssize_t show_parent(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct net_device *dev = to_net_dev(d); + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + return sprintf(buf, "%s\n", priv->parent->name); +} +static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL); + +static bool is_child_unique(struct ipoib_dev_priv *ppriv, + struct ipoib_dev_priv *priv) +{ + struct ipoib_dev_priv *tpriv; + + ASSERT_RTNL(); + + /* + * Since the legacy sysfs interface uses pkey for deletion it cannot + * support more than one interface with the same pkey, it creates + * ambiguity. The RTNL interface deletes using the netdev so it does + * not have a problem to support duplicated pkeys. + */ + if (priv->child_type != IPOIB_LEGACY_CHILD) + return true; + + /* + * First ensure this isn't a duplicate. We check the parent device and + * then all of the legacy child interfaces to make sure the Pkey + * doesn't match. + */ + if (ppriv->pkey == priv->pkey) + return false; + + list_for_each_entry(tpriv, &ppriv->child_intfs, list) { + if (tpriv->pkey == priv->pkey && + tpriv->child_type == IPOIB_LEGACY_CHILD) + return false; + } + + return true; +} + +/* + * NOTE: If this function fails then the priv->dev will remain valid, however + * priv can have been freed and must not be touched by caller in the error + * case. + * + * If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to + * free the net_device (just as rtnl_newlink does) otherwise the net_device + * will be freed when the rtnl is unlocked. + */ +int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, + u16 pkey, int type) +{ + struct net_device *ndev = priv->dev; + int result; + + ASSERT_RTNL(); + + /* + * Racing with unregister of the parent must be prevented by the + * caller. + */ + WARN_ON(ppriv->dev->reg_state != NETREG_REGISTERED); + + if (pkey == 0 || pkey == 0x8000) { + result = -EINVAL; + goto out_early; + } + + priv->parent = ppriv->dev; + priv->pkey = pkey; + priv->child_type = type; + + if (!is_child_unique(ppriv, priv)) { + result = -ENOTUNIQ; + goto out_early; + } + + /* We do not need to touch priv if register_netdevice fails */ + ndev->priv_destructor = ipoib_intf_free; + + result = register_netdevice(ndev); + if (result) { + ipoib_warn(priv, "failed to initialize; error %i", result); + + /* + * register_netdevice sometimes calls priv_destructor, + * sometimes not. Make sure it was done. + */ + goto out_early; + } + + /* RTNL childs don't need proprietary sysfs entries */ + if (type == IPOIB_LEGACY_CHILD) { + if (ipoib_cm_add_mode_attr(ndev)) + goto sysfs_failed; + if (ipoib_add_pkey_attr(ndev)) + goto sysfs_failed; + if (ipoib_add_umcast_attr(ndev)) + goto sysfs_failed; + + if (device_create_file(&ndev->dev, &dev_attr_parent)) + goto sysfs_failed; + } + + return 0; + +sysfs_failed: + unregister_netdevice(priv->dev); + return -ENOMEM; + +out_early: + if (ndev->priv_destructor) + ndev->priv_destructor(ndev); + return result; +} + +int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) +{ + struct ipoib_dev_priv *ppriv, *priv; + char intf_name[IFNAMSIZ]; + struct net_device *ndev; + int result; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!rtnl_trylock()) + return restart_syscall(); + + if (pdev->reg_state != NETREG_REGISTERED) { + rtnl_unlock(); + return -EPERM; + } + + ppriv = ipoib_priv(pdev); + + snprintf(intf_name, sizeof(intf_name), "%s.%04x", + ppriv->dev->name, pkey); + + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); + if (!priv) { + result = -ENOMEM; + goto out; + } + ndev = priv->dev; + + result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD); + + if (result && ndev->reg_state == NETREG_UNINITIALIZED) + free_netdev(ndev); + +out: + rtnl_unlock(); + + return result; +} + +struct ipoib_vlan_delete_work { + struct work_struct work; + struct net_device *dev; +}; + +/* + * sysfs callbacks of a netdevice cannot obtain the rtnl lock as + * unregister_netdev ultimately deletes the sysfs files while holding the rtnl + * lock. This deadlocks the system. + * + * A callback can use rtnl_trylock to avoid the deadlock but it cannot call + * unregister_netdev as that internally takes and releases the rtnl_lock. So + * instead we find the netdev to unregister and then do the actual unregister + * from the global work queue where we can obtain the rtnl_lock safely. + */ +static void ipoib_vlan_delete_task(struct work_struct *work) +{ + struct ipoib_vlan_delete_work *pwork = + container_of(work, struct ipoib_vlan_delete_work, work); + struct net_device *dev = pwork->dev; + + rtnl_lock(); + + /* Unregistering tasks can race with another task or parent removal */ + if (dev->reg_state == NETREG_REGISTERED) { + struct ipoib_dev_priv *priv = ipoib_priv(dev); + struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); + + ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name); + unregister_netdevice(dev); + } + + rtnl_unlock(); + + kfree(pwork); +} + +int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) +{ + struct ipoib_dev_priv *ppriv, *priv, *tpriv; + int rc; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!rtnl_trylock()) + return restart_syscall(); + + if (pdev->reg_state != NETREG_REGISTERED) { + rtnl_unlock(); + return -EPERM; + } + + ppriv = ipoib_priv(pdev); + + rc = -ENODEV; + list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { + if (priv->pkey == pkey && + priv->child_type == IPOIB_LEGACY_CHILD) { + struct ipoib_vlan_delete_work *work; + + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) { + rc = -ENOMEM; + goto out; + } + + down_write(&ppriv->vlan_rwsem); + list_del_init(&priv->list); + up_write(&ppriv->vlan_rwsem); + work->dev = priv->dev; + INIT_WORK(&work->work, ipoib_vlan_delete_task); + queue_work(ipoib_workqueue, &work->work); + + rc = 0; + break; + } + } + +out: + rtnl_unlock(); + + return rc; +} diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig new file mode 100644 index 000000000..d00af71a2 --- /dev/null +++ b/drivers/infiniband/ulp/iser/Kconfig @@ -0,0 +1,12 @@ +config INFINIBAND_ISER + tristate "iSCSI Extensions for RDMA (iSER)" + depends on SCSI && INET && INFINIBAND_ADDR_TRANS + select SCSI_ISCSI_ATTRS + ---help--- + Support for the iSCSI Extensions for RDMA (iSER) Protocol + over InfiniBand. This allows you to access storage devices + that speak iSCSI over iSER over InfiniBand. + + The iSER protocol is defined by IETF. + See <http://www.ietf.org/rfc/rfc5046.txt> + and <http://members.infinibandta.org/kwspub/spec/Annex_iSER.PDF> diff --git a/drivers/infiniband/ulp/iser/Makefile b/drivers/infiniband/ulp/iser/Makefile new file mode 100644 index 000000000..fe6cd15f2 --- /dev/null +++ b/drivers/infiniband/ulp/iser/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_INFINIBAND_ISER) += ib_iser.o + +ib_iser-y := iser_verbs.o iser_initiator.o iser_memory.o \ + iscsi_iser.o diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c new file mode 100644 index 000000000..b4e0ae024 --- /dev/null +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -0,0 +1,1122 @@ +/* + * iSCSI Initiator over iSER Data-Path + * + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 Mike Christie + * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * maintained by openib-general@openib.org + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Credits: + * Christoph Hellwig + * FUJITA Tomonori + * Arne Redlich + * Zhenyu Wang + * Modified by: + * Erez Zilber + */ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/hardirq.h> +#include <linux/kfifo.h> +#include <linux/blkdev.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/in.h> +#include <linux/net.h> +#include <linux/scatterlist.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include <net/sock.h> + +#include <linux/uaccess.h> + +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi.h> +#include <scsi/scsi_transport_iscsi.h> + +#include "iscsi_iser.h" + +MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); + +static struct scsi_host_template iscsi_iser_sht; +static struct iscsi_transport iscsi_iser_transport; +static struct scsi_transport_template *iscsi_iser_scsi_transport; +static struct workqueue_struct *release_wq; +static DEFINE_MUTEX(unbind_iser_conn_mutex); +struct iser_global ig; + +int iser_debug_level = 0; +module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); + +static unsigned int iscsi_max_lun = 512; +module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); +MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512"); + +unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; +module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); + +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); + +bool iser_pi_enable = false; +module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); +MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); + +int iser_pi_guard; +module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); +MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); + +/* + * iscsi_iser_recv() - Process a successful recv completion + * @conn: iscsi connection + * @hdr: iscsi header + * @rx_data: buffer containing receive data payload + * @rx_data_len: length of rx_data + * + * Notes: In case of data length errors or iscsi PDU completion failures + * this routine will signal iscsi layer of connection failure. + */ +void +iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *rx_data, int rx_data_len) +{ + int rc = 0; + int datalen; + + /* verify PDU length */ + datalen = ntoh24(hdr->dlength); + if (datalen > rx_data_len || (datalen + 4) < rx_data_len) { + iser_err("wrong datalen %d (hdr), %d (IB)\n", + datalen, rx_data_len); + rc = ISCSI_ERR_DATALEN; + goto error; + } + + if (datalen != rx_data_len) + iser_dbg("aligned datalen (%d) hdr, %d (IB)\n", + datalen, rx_data_len); + + rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); + if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) + goto error; + + return; +error: + iscsi_conn_failure(conn, rc); +} + +/** + * iscsi_iser_pdu_alloc() - allocate an iscsi-iser PDU + * @task: iscsi task + * @opcode: iscsi command opcode + * + * Netes: This routine can't fail, just assign iscsi task + * hdr and max hdr size. + */ +static int +iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + + task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header; + task->hdr_max = sizeof(iser_task->desc.iscsi_header); + + return 0; +} + +/** + * iser_initialize_task_headers() - Initialize task headers + * @task: iscsi task + * @tx_desc: iser tx descriptor + * + * Notes: + * This routine may race with iser teardown flow for scsi + * error handling TMFs. So for TMF we should acquire the + * state mutex to avoid dereferencing the IB device which + * may have already been terminated. + */ +int +iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc) +{ + struct iser_conn *iser_conn = task->conn->dd_data; + struct iser_device *device = iser_conn->ib_conn.device; + struct iscsi_iser_task *iser_task = task->dd_data; + u64 dma_addr; + const bool mgmt_task = !task->sc && !in_interrupt(); + int ret = 0; + + if (unlikely(mgmt_task)) + mutex_lock(&iser_conn->state_mutex); + + if (unlikely(iser_conn->state != ISER_CONN_UP)) { + ret = -ENODEV; + goto out; + } + + dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + if (ib_dma_mapping_error(device->ib_device, dma_addr)) { + ret = -ENOMEM; + goto out; + } + + tx_desc->wr_idx = 0; + tx_desc->mapped = true; + tx_desc->dma_addr = dma_addr; + tx_desc->tx_sg[0].addr = tx_desc->dma_addr; + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; + + iser_task->iser_conn = iser_conn; +out: + if (unlikely(mgmt_task)) + mutex_unlock(&iser_conn->state_mutex); + + return ret; +} + +/** + * iscsi_iser_task_init() - Initialize iscsi-iser task + * @task: iscsi task + * + * Initialize the task for the scsi command or mgmt command. + * + * Return: Returns zero on success or -ENOMEM when failing + * to init task headers (dma mapping error). + */ +static int +iscsi_iser_task_init(struct iscsi_task *task) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + int ret; + + ret = iser_initialize_task_headers(task, &iser_task->desc); + if (ret) { + iser_err("Failed to init task %p, err = %d\n", + iser_task, ret); + return ret; + } + + /* mgmt task */ + if (!task->sc) + return 0; + + iser_task->command_sent = 0; + iser_task_rdma_init(iser_task); + iser_task->sc = task->sc; + + return 0; +} + +/** + * iscsi_iser_mtask_xmit() - xmit management (immediate) task + * @conn: iscsi connection + * @task: task management task + * + * Notes: + * The function can return -EAGAIN in which case caller must + * call it again later, or recover. '0' return code means successful + * xmit. + * + **/ +static int +iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +{ + int error = 0; + + iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt); + + error = iser_send_control(conn, task); + + /* since iser xmits control with zero copy, tasks can not be recycled + * right after sending them. + * The recycling scheme is based on whether a response is expected + * - if yes, the task is recycled at iscsi_complete_pdu + * - if no, the task is recycled at iser_snd_completion + */ + return error; +} + +static int +iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, + struct iscsi_task *task) +{ + struct iscsi_r2t_info *r2t = &task->unsol_r2t; + struct iscsi_data hdr; + int error = 0; + + /* Send data-out PDUs while there's still unsolicited data to send */ + while (iscsi_task_has_unsol_data(task)) { + iscsi_prep_data_out_pdu(task, r2t, &hdr); + iser_dbg("Sending data-out: itt 0x%x, data count %d\n", + hdr.itt, r2t->data_count); + + /* the buffer description has been passed with the command */ + /* Send the command */ + error = iser_send_data_out(conn, task, &hdr); + if (error) { + r2t->datasn--; + goto iscsi_iser_task_xmit_unsol_data_exit; + } + r2t->sent += r2t->data_count; + iser_dbg("Need to send %d more as data-out PDUs\n", + r2t->data_length - r2t->sent); + } + +iscsi_iser_task_xmit_unsol_data_exit: + return error; +} + +/** + * iscsi_iser_task_xmit() - xmit iscsi-iser task + * @task: iscsi task + * + * Return: zero on success or escalates $error on failure. + */ +static int +iscsi_iser_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_iser_task *iser_task = task->dd_data; + int error = 0; + + if (!task->sc) + return iscsi_iser_mtask_xmit(conn, task); + + if (task->sc->sc_data_direction == DMA_TO_DEVICE) { + BUG_ON(scsi_bufflen(task->sc) == 0); + + iser_dbg("cmd [itt %x total %d imm %d unsol_data %d\n", + task->itt, scsi_bufflen(task->sc), + task->imm_count, task->unsol_r2t.data_length); + } + + iser_dbg("ctask xmit [cid %d itt 0x%x]\n", + conn->id, task->itt); + + /* Send the cmd PDU */ + if (!iser_task->command_sent) { + error = iser_send_command(conn, task); + if (error) + goto iscsi_iser_task_xmit_exit; + iser_task->command_sent = 1; + } + + /* Send unsolicited data-out PDU(s) if necessary */ + if (iscsi_task_has_unsol_data(task)) + error = iscsi_iser_task_xmit_unsol_data(conn, task); + + iscsi_iser_task_xmit_exit: + return error; +} + +/** + * iscsi_iser_cleanup_task() - cleanup an iscsi-iser task + * @task: iscsi task + * + * Notes: In case the RDMA device is already NULL (might have + * been removed in DEVICE_REMOVAL CM event it will bail-out + * without doing dma unmapping. + */ +static void iscsi_iser_cleanup_task(struct iscsi_task *task) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct iser_conn *iser_conn = task->conn->dd_data; + struct iser_device *device = iser_conn->ib_conn.device; + + /* DEVICE_REMOVAL event might have already released the device */ + if (!device) + return; + + if (likely(tx_desc->mapped)) { + ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + tx_desc->mapped = false; + } + + /* mgmt tasks do not need special cleanup */ + if (!task->sc) + return; + + if (iser_task->status == ISER_TASK_STATUS_STARTED) { + iser_task->status = ISER_TASK_STATUS_COMPLETED; + iser_task_rdma_finalize(iser_task); + } +} + +/** + * iscsi_iser_check_protection() - check protection information status of task. + * @task: iscsi task + * @sector: error sector if exsists (output) + * + * Return: zero if no data-integrity errors have occured + * 0x1: data-integrity error occured in the guard-block + * 0x2: data-integrity error occured in the reference tag + * 0x3: data-integrity error occured in the application tag + * + * In addition the error sector is marked. + */ +static u8 +iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + + if (iser_task->dir[ISER_DIR_IN]) + return iser_check_task_pi_status(iser_task, ISER_DIR_IN, + sector); + else + return iser_check_task_pi_status(iser_task, ISER_DIR_OUT, + sector); +} + +/** + * iscsi_iser_conn_create() - create a new iscsi-iser connection + * @cls_session: iscsi class connection + * @conn_idx: connection index within the session (for MCS) + * + * Return: iscsi_cls_conn when iscsi_conn_setup succeeds or NULL + * otherwise. + */ +static struct iscsi_cls_conn * +iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, + uint32_t conn_idx) +{ + struct iscsi_conn *conn; + struct iscsi_cls_conn *cls_conn; + + cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + + /* + * due to issues with the login code re iser sematics + * this not set in iscsi_conn_setup - FIXME + */ + conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN; + + return cls_conn; +} + +/** + * iscsi_iser_conn_bind() - bind iscsi and iser connection structures + * @cls_session: iscsi class session + * @cls_conn: iscsi class connection + * @transport_eph: transport end-point handle + * @is_leading: indicate if this is the session leading connection (MCS) + * + * Return: zero on success, $error if iscsi_conn_bind fails and + * -EINVAL in case end-point doesn't exsits anymore or iser connection + * state is not UP (teardown already started). + */ +static int +iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_eph, + int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iser_conn *iser_conn; + struct iscsi_endpoint *ep; + int error; + + error = iscsi_conn_bind(cls_session, cls_conn, is_leading); + if (error) + return error; + + /* the transport ep handle comes from user space so it must be + * verified against the global ib connections list */ + ep = iscsi_lookup_endpoint(transport_eph); + if (!ep) { + iser_err("can't bind eph %llx\n", + (unsigned long long)transport_eph); + return -EINVAL; + } + iser_conn = ep->dd_data; + + mutex_lock(&iser_conn->state_mutex); + if (iser_conn->state != ISER_CONN_UP) { + error = -EINVAL; + iser_err("iser_conn %p state is %d, teardown started\n", + iser_conn, iser_conn->state); + goto out; + } + + error = iser_alloc_rx_descriptors(iser_conn, conn->session); + if (error) + goto out; + + /* binds the iSER connection retrieved from the previously + * connected ep_handle to the iSCSI layer connection. exchanges + * connection pointers */ + iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn); + + conn->dd_data = iser_conn; + iser_conn->iscsi_conn = conn; + +out: + mutex_unlock(&iser_conn->state_mutex); + return error; +} + +/** + * iscsi_iser_conn_start() - start iscsi-iser connection + * @cls_conn: iscsi class connection + * + * Notes: Here iser intialize (or re-initialize) stop_completion as + * from this point iscsi must call conn_stop in session/connection + * teardown so iser transport must wait for it. + */ +static int +iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *iscsi_conn; + struct iser_conn *iser_conn; + + iscsi_conn = cls_conn->dd_data; + iser_conn = iscsi_conn->dd_data; + reinit_completion(&iser_conn->stop_completion); + + return iscsi_conn_start(cls_conn); +} + +/** + * iscsi_iser_conn_stop() - stop iscsi-iser connection + * @cls_conn: iscsi class connection + * @flag: indicate if recover or terminate (passed as is) + * + * Notes: Calling iscsi_conn_stop might theoretically race with + * DEVICE_REMOVAL event and dereference a previously freed RDMA device + * handle, so we call it under iser the state lock to protect against + * this kind of race. + */ +static void +iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iser_conn *iser_conn = conn->dd_data; + + iser_info("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn); + + /* + * Userspace may have goofed up and not bound the connection or + * might have only partially setup the connection. + */ + if (iser_conn) { + mutex_lock(&iser_conn->state_mutex); + mutex_lock(&unbind_iser_conn_mutex); + iser_conn_terminate(iser_conn); + iscsi_conn_stop(cls_conn, flag); + + /* unbind */ + iser_conn->iscsi_conn = NULL; + conn->dd_data = NULL; + mutex_unlock(&unbind_iser_conn_mutex); + + complete(&iser_conn->stop_completion); + mutex_unlock(&iser_conn->state_mutex); + } else { + iscsi_conn_stop(cls_conn, flag); + } +} + +/** + * iscsi_iser_session_destroy() - destroy iscsi-iser session + * @cls_session: iscsi class session + * + * Removes and free iscsi host. + */ +static void +iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + + iscsi_session_teardown(cls_session); + iscsi_host_remove(shost); + iscsi_host_free(shost); +} + +static inline unsigned int +iser_dif_prot_caps(int prot_caps) +{ + return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? + SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION : 0) | + ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? + SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) | + ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0); +} + +/** + * iscsi_iser_session_create() - create an iscsi-iser session + * @ep: iscsi end-point handle + * @cmds_max: maximum commands in this session + * @qdepth: session command queue depth + * @initial_cmdsn: initiator command sequnce number + * + * Allocates and adds a scsi host, expose DIF supprot if + * exists, and sets up an iscsi session. + */ +static struct iscsi_cls_session * +iscsi_iser_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn) +{ + struct iscsi_cls_session *cls_session; + struct Scsi_Host *shost; + struct iser_conn *iser_conn = NULL; + struct ib_conn *ib_conn; + u32 max_fr_sectors; + + shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); + if (!shost) + return NULL; + shost->transportt = iscsi_iser_scsi_transport; + shost->cmd_per_lun = qdepth; + shost->max_lun = iscsi_max_lun; + shost->max_id = 0; + shost->max_channel = 0; + shost->max_cmd_len = 16; + + /* + * older userspace tools (before 2.0-870) did not pass us + * the leading conn's ep so this will be NULL; + */ + if (ep) { + iser_conn = ep->dd_data; + shost->sg_tablesize = iser_conn->scsi_sg_tablesize; + shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds); + + mutex_lock(&iser_conn->state_mutex); + if (iser_conn->state != ISER_CONN_UP) { + iser_err("iser conn %p already started teardown\n", + iser_conn); + mutex_unlock(&iser_conn->state_mutex); + goto free_host; + } + + ib_conn = &iser_conn->ib_conn; + if (ib_conn->pi_support) { + u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap; + + shost->sg_prot_tablesize = shost->sg_tablesize; + scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP | + SHOST_DIX_GUARD_CRC); + } + + if (iscsi_host_add(shost, + ib_conn->device->ib_device->dev.parent)) { + mutex_unlock(&iser_conn->state_mutex); + goto free_host; + } + mutex_unlock(&iser_conn->state_mutex); + } else { + shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX); + if (iscsi_host_add(shost, NULL)) + goto free_host; + } + + max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9; + shost->max_sectors = min(iser_max_sectors, max_fr_sectors); + + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", + iser_conn, shost->sg_tablesize, + shost->max_sectors); + + if (shost->max_sectors < iser_max_sectors) + iser_warn("max_sectors was reduced from %u to %u\n", + iser_max_sectors, shost->max_sectors); + + cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, + shost->can_queue, 0, + sizeof(struct iscsi_iser_task), + initial_cmdsn, 0); + if (!cls_session) + goto remove_host; + + return cls_session; + +remove_host: + iscsi_host_remove(shost); +free_host: + iscsi_host_free(shost); + return NULL; +} + +static int +iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) +{ + int value; + + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + /* TBD */ + break; + case ISCSI_PARAM_HDRDGST_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("DataDigest wasn't negotiated to None\n"); + return -EPROTO; + } + break; + case ISCSI_PARAM_DATADGST_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("DataDigest wasn't negotiated to None\n"); + return -EPROTO; + } + break; + case ISCSI_PARAM_IFMARKER_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("IFMarker wasn't negotiated to No\n"); + return -EPROTO; + } + break; + case ISCSI_PARAM_OFMARKER_EN: + sscanf(buf, "%d", &value); + if (value) { + iser_err("OFMarker wasn't negotiated to No\n"); + return -EPROTO; + } + break; + default: + return iscsi_set_param(cls_conn, param, buf, buflen); + } + + return 0; +} + +/** + * iscsi_iser_set_param() - set class connection parameter + * @cls_conn: iscsi class connection + * @stats: iscsi stats to output + * + * Output connection statistics. + */ +static void +iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; /* always 0 */ + stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */ + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->custom_length = 0; +} + +static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct iser_conn *iser_conn = ep->dd_data; + int len; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + if (!iser_conn || !iser_conn->ib_conn.cma_id) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &iser_conn->ib_conn.cma_id->route.addr.dst_addr, + param, buf); + break; + default: + return -ENOSYS; + } + + return len; +} + +/** + * iscsi_iser_ep_connect() - Initiate iSER connection establishment + * @shost: scsi_host + * @dst_addr: destination address + * @non-blocking: indicate if routine can block + * + * Allocate an iscsi endpoint, an iser_conn structure and bind them. + * After that start RDMA connection establishment via rdma_cm. We + * don't allocate iser_conn embedded in iscsi_endpoint since in teardown + * the endpoint will be destroyed at ep_disconnect while iser_conn will + * cleanup its resources asynchronuously. + * + * Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error) + * if fails. + */ +static struct iscsi_endpoint * +iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) +{ + int err; + struct iser_conn *iser_conn; + struct iscsi_endpoint *ep; + + ep = iscsi_create_endpoint(0); + if (!ep) + return ERR_PTR(-ENOMEM); + + iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL); + if (!iser_conn) { + err = -ENOMEM; + goto failure; + } + + ep->dd_data = iser_conn; + iser_conn->ep = ep; + iser_conn_init(iser_conn); + + err = iser_connect(iser_conn, NULL, dst_addr, non_blocking); + if (err) + goto failure; + + return ep; +failure: + iscsi_destroy_endpoint(ep); + return ERR_PTR(err); +} + +/** + * iscsi_iser_ep_poll() - poll for iser connection establishment to complete + * @ep: iscsi endpoint (created at ep_connect) + * @timeout_ms: polling timeout allowed in ms. + * + * This routine boils down to waiting for up_completion signaling + * that cma_id got CONNECTED event. + * + * Return: 1 if succeeded in connection establishment, 0 if timeout expired + * (libiscsi will retry will kick in) or -1 if interrupted by signal + * or more likely iser connection state transitioned to TEMINATING or + * DOWN during the wait period. + */ +static int +iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct iser_conn *iser_conn = ep->dd_data; + int rc; + + rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion, + msecs_to_jiffies(timeout_ms)); + /* if conn establishment failed, return error code to iscsi */ + if (rc == 0) { + mutex_lock(&iser_conn->state_mutex); + if (iser_conn->state == ISER_CONN_TERMINATING || + iser_conn->state == ISER_CONN_DOWN) + rc = -1; + mutex_unlock(&iser_conn->state_mutex); + } + + iser_info("iser conn %p rc = %d\n", iser_conn, rc); + + if (rc > 0) + return 1; /* success, this is the equivalent of EPOLLOUT */ + else if (!rc) + return 0; /* timeout */ + else + return rc; /* signal */ +} + +/** + * iscsi_iser_ep_disconnect() - Initiate connection teardown process + * @ep: iscsi endpoint handle + * + * This routine is not blocked by iser and RDMA termination process + * completion as we queue a deffered work for iser/RDMA destruction + * and cleanup or actually call it immediately in case we didn't pass + * iscsi conn bind/start stage, thus it is safe. + */ +static void +iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct iser_conn *iser_conn = ep->dd_data; + + iser_info("ep %p iser conn %p\n", ep, iser_conn); + + mutex_lock(&iser_conn->state_mutex); + iser_conn_terminate(iser_conn); + + /* + * if iser_conn and iscsi_conn are bound, we must wait for + * iscsi_conn_stop and flush errors completion before freeing + * the iser resources. Otherwise we are safe to free resources + * immediately. + */ + if (iser_conn->iscsi_conn) { + INIT_WORK(&iser_conn->release_work, iser_release_work); + queue_work(release_wq, &iser_conn->release_work); + mutex_unlock(&iser_conn->state_mutex); + } else { + iser_conn->state = ISER_CONN_DOWN; + mutex_unlock(&iser_conn->state_mutex); + iser_conn_release(iser_conn); + } + + iscsi_destroy_endpoint(ep); +} + +static umode_t iser_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + case ISCSI_PARAM_DISCOVERY_SESS: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} + +static int iscsi_iser_slave_alloc(struct scsi_device *sdev) +{ + struct iscsi_session *session; + struct iser_conn *iser_conn; + struct ib_device *ib_dev; + + mutex_lock(&unbind_iser_conn_mutex); + + session = starget_to_session(scsi_target(sdev))->dd_data; + iser_conn = session->leadconn->dd_data; + if (!iser_conn) { + mutex_unlock(&unbind_iser_conn_mutex); + return -ENOTCONN; + } + ib_dev = iser_conn->ib_conn.device->ib_device; + + if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) + blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); + + mutex_unlock(&unbind_iser_conn_mutex); + + return 0; +} + +static struct scsi_host_template iscsi_iser_sht = { + .module = THIS_MODULE, + .name = "iSCSI Initiator over iSER", + .queuecommand = iscsi_queuecommand, + .change_queue_depth = scsi_change_queue_depth, + .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, + .cmd_per_lun = ISER_DEF_CMD_PER_LUN, + .eh_timed_out = iscsi_eh_cmd_timed_out, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler= iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .target_alloc = iscsi_target_alloc, + .use_clustering = ENABLE_CLUSTERING, + .slave_alloc = iscsi_iser_slave_alloc, + .proc_name = "iscsi_iser", + .this_id = -1, + .track_queue_depth = 1, +}; + +static struct iscsi_transport iscsi_iser_transport = { + .owner = THIS_MODULE, + .name = "iser", + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO, + /* session management */ + .create_session = iscsi_iser_session_create, + .destroy_session = iscsi_iser_session_destroy, + /* connection management */ + .create_conn = iscsi_iser_conn_create, + .bind_conn = iscsi_iser_conn_bind, + .destroy_conn = iscsi_conn_teardown, + .attr_is_visible = iser_attr_is_visible, + .set_param = iscsi_iser_set_param, + .get_conn_param = iscsi_conn_get_param, + .get_ep_param = iscsi_iser_get_ep_param, + .get_session_param = iscsi_session_get_param, + .start_conn = iscsi_iser_conn_start, + .stop_conn = iscsi_iser_conn_stop, + /* iscsi host params */ + .get_host_param = iscsi_host_get_param, + .set_host_param = iscsi_host_set_param, + /* IO */ + .send_pdu = iscsi_conn_send_pdu, + .get_stats = iscsi_iser_conn_get_stats, + .init_task = iscsi_iser_task_init, + .xmit_task = iscsi_iser_task_xmit, + .cleanup_task = iscsi_iser_cleanup_task, + .alloc_pdu = iscsi_iser_pdu_alloc, + .check_protection = iscsi_iser_check_protection, + /* recovery */ + .session_recovery_timedout = iscsi_session_recovery_timedout, + + .ep_connect = iscsi_iser_ep_connect, + .ep_poll = iscsi_iser_ep_poll, + .ep_disconnect = iscsi_iser_ep_disconnect +}; + +static int __init iser_init(void) +{ + int err; + + iser_dbg("Starting iSER datamover...\n"); + + if (iscsi_max_lun < 1) { + iser_err("Invalid max_lun value of %u\n", iscsi_max_lun); + return -EINVAL; + } + + memset(&ig, 0, sizeof(struct iser_global)); + + ig.desc_cache = kmem_cache_create("iser_descriptors", + sizeof(struct iser_tx_desc), + 0, SLAB_HWCACHE_ALIGN, + NULL); + if (ig.desc_cache == NULL) + return -ENOMEM; + + /* device init is called only after the first addr resolution */ + mutex_init(&ig.device_list_mutex); + INIT_LIST_HEAD(&ig.device_list); + mutex_init(&ig.connlist_mutex); + INIT_LIST_HEAD(&ig.connlist); + + release_wq = alloc_workqueue("release workqueue", 0, 0); + if (!release_wq) { + iser_err("failed to allocate release workqueue\n"); + err = -ENOMEM; + goto err_alloc_wq; + } + + iscsi_iser_scsi_transport = iscsi_register_transport( + &iscsi_iser_transport); + if (!iscsi_iser_scsi_transport) { + iser_err("iscsi_register_transport failed\n"); + err = -EINVAL; + goto err_reg; + } + + return 0; + +err_reg: + destroy_workqueue(release_wq); +err_alloc_wq: + kmem_cache_destroy(ig.desc_cache); + + return err; +} + +static void __exit iser_exit(void) +{ + struct iser_conn *iser_conn, *n; + int connlist_empty; + + iser_dbg("Removing iSER datamover...\n"); + destroy_workqueue(release_wq); + + mutex_lock(&ig.connlist_mutex); + connlist_empty = list_empty(&ig.connlist); + mutex_unlock(&ig.connlist_mutex); + + if (!connlist_empty) { + iser_err("Error cleanup stage completed but we still have iser " + "connections, destroying them anyway\n"); + list_for_each_entry_safe(iser_conn, n, &ig.connlist, + conn_list) { + iser_conn_release(iser_conn); + } + } + + iscsi_unregister_transport(&iscsi_iser_transport); + kmem_cache_destroy(ig.desc_cache); +} + +module_init(iser_init); +module_exit(iser_exit); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h new file mode 100644 index 000000000..a7aeaa0c6 --- /dev/null +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -0,0 +1,716 @@ +/* + * iSER transport for the Open iSCSI Initiator & iSER transport internals + * + * Copyright (C) 2004 Dmitry Yusupov + * Copyright (C) 2004 Alex Aizman + * Copyright (C) 2005 Mike Christie + * based on code maintained by open-iscsi@googlegroups.com + * + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ISCSI_ISER_H__ +#define __ISCSI_ISER_H__ + +#include <linux/types.h> +#include <linux/net.h> +#include <linux/printk.h> +#include <scsi/libiscsi.h> +#include <scsi/scsi_transport_iscsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/iser.h> + +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/mutex.h> +#include <linux/mempool.h> +#include <linux/uio.h> + +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_fmr_pool.h> +#include <rdma/rdma_cm.h> + +#define DRV_NAME "iser" +#define PFX DRV_NAME ": " +#define DRV_VER "1.6" + +#define iser_dbg(fmt, arg...) \ + do { \ + if (unlikely(iser_debug_level > 2)) \ + printk(KERN_DEBUG PFX "%s: " fmt,\ + __func__ , ## arg); \ + } while (0) + +#define iser_warn(fmt, arg...) \ + do { \ + if (unlikely(iser_debug_level > 0)) \ + pr_warn(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define iser_info(fmt, arg...) \ + do { \ + if (unlikely(iser_debug_level > 1)) \ + pr_info(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define iser_err(fmt, arg...) \ + pr_err(PFX "%s: " fmt, __func__ , ## arg) + +#define SHIFT_4K 12 +#define SIZE_4K (1ULL << SHIFT_4K) +#define MASK_4K (~(SIZE_4K-1)) + +/* Default support is 512KB I/O size */ +#define ISER_DEF_MAX_SECTORS 1024 +#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K) +/* Maximum support is 8MB I/O size */ +#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K) + +#define ISER_DEF_XMIT_CMDS_DEFAULT 512 +#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT + #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX +#else + #define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT +#endif +#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX + +/* QP settings */ +/* Maximal bounds on received asynchronous PDUs */ +#define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */ + +#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * + * SCSI_TMFUNC(2), LOGOUT(1) */ + +#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX) + +#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2) + +/* the max TX (send) WR supported by the iSER QP is defined by * + * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * + * to have at max for SCSI command. The tx posting & completion handling code * + * supports -EAGAIN scheme where tx is suspended till the QP has room for more * + * send WR. D=8 comes from 64K/8K */ + +#define ISER_INFLIGHT_DATAOUTS 8 + +#define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \ + (1 + ISER_INFLIGHT_DATAOUTS) + \ + ISER_MAX_TX_MISC_PDUS + \ + ISER_MAX_RX_MISC_PDUS) + +/* Max registration work requests per command */ +#define ISER_MAX_REG_WR_PER_CMD 5 + +/* For Signature we don't support DATAOUTs so no need to make room for them */ +#define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \ + (1 + ISER_MAX_REG_WR_PER_CMD) + \ + ISER_MAX_TX_MISC_PDUS + \ + ISER_MAX_RX_MISC_PDUS) + +#define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr \ + - ISER_MAX_TX_MISC_PDUS \ + - ISER_MAX_RX_MISC_PDUS) / \ + (1 + ISER_INFLIGHT_DATAOUTS)) + +#define ISER_SIGNAL_CMD_COUNT 32 + +/* Constant PDU lengths calculations */ +#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr)) + +#define ISER_RECV_DATA_SEG_LEN 128 +#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN) +#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN) + +/* Length of an object name string */ +#define ISER_OBJECT_NAME_SIZE 64 + +enum iser_conn_state { + ISER_CONN_INIT, /* descriptor allocd, no conn */ + ISER_CONN_PENDING, /* in the process of being established */ + ISER_CONN_UP, /* up and running */ + ISER_CONN_TERMINATING, /* in the process of being terminated */ + ISER_CONN_DOWN, /* shut down */ + ISER_CONN_STATES_NUM +}; + +enum iser_task_status { + ISER_TASK_STATUS_INIT = 0, + ISER_TASK_STATUS_STARTED, + ISER_TASK_STATUS_COMPLETED +}; + +enum iser_data_dir { + ISER_DIR_IN = 0, /* to initiator */ + ISER_DIR_OUT, /* from initiator */ + ISER_DIRS_NUM +}; + +/** + * struct iser_data_buf - iSER data buffer + * + * @sg: pointer to the sg list + * @size: num entries of this sg + * @data_len: total beffer byte len + * @dma_nents: returned by dma_map_sg + */ +struct iser_data_buf { + struct scatterlist *sg; + int size; + unsigned long data_len; + int dma_nents; +}; + +/* fwd declarations */ +struct iser_device; +struct iscsi_iser_task; +struct iscsi_endpoint; +struct iser_reg_resources; + +/** + * struct iser_mem_reg - iSER memory registration info + * + * @sge: memory region sg element + * @rkey: memory region remote key + * @mem_h: pointer to registration context (FMR/Fastreg) + */ +struct iser_mem_reg { + struct ib_sge sge; + u32 rkey; + void *mem_h; +}; + +enum iser_desc_type { + ISCSI_TX_CONTROL , + ISCSI_TX_SCSI_COMMAND, + ISCSI_TX_DATAOUT +}; + +/* Maximum number of work requests per task: + * Data memory region local invalidate + fast registration + * Protection memory region local invalidate + fast registration + * Signature memory region local invalidate + fast registration + * PDU send + */ +#define ISER_MAX_WRS 7 + +/** + * struct iser_tx_desc - iSER TX descriptor + * + * @iser_header: iser header + * @iscsi_header: iscsi header + * @type: command/control/dataout + * @dam_addr: header buffer dma_address + * @tx_sg: sg[0] points to iser/iscsi headers + * sg[1] optionally points to either of immediate data + * unsolicited data-out or control + * @num_sge: number sges used on this TX task + * @mapped: Is the task header mapped + * @wr_idx: Current WR index + * @wrs: Array of WRs per task + * @data_reg: Data buffer registration details + * @prot_reg: Protection buffer registration details + * @sig_attrs: Signature attributes + */ +struct iser_tx_desc { + struct iser_ctrl iser_header; + struct iscsi_hdr iscsi_header; + enum iser_desc_type type; + u64 dma_addr; + struct ib_sge tx_sg[2]; + int num_sge; + struct ib_cqe cqe; + bool mapped; + u8 wr_idx; + union iser_wr { + struct ib_send_wr send; + struct ib_reg_wr fast_reg; + struct ib_sig_handover_wr sig; + } wrs[ISER_MAX_WRS]; + struct iser_mem_reg data_reg; + struct iser_mem_reg prot_reg; + struct ib_sig_attrs sig_attrs; +}; + +#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ + sizeof(u64) + sizeof(struct ib_sge) + \ + sizeof(struct ib_cqe))) +/** + * struct iser_rx_desc - iSER RX descriptor + * + * @iser_header: iser header + * @iscsi_header: iscsi header + * @data: received data segment + * @dma_addr: receive buffer dma address + * @rx_sg: ib_sge of receive buffer + * @pad: for sense data TODO: Modify to maximum sense length supported + */ +struct iser_rx_desc { + struct iser_ctrl iser_header; + struct iscsi_hdr iscsi_header; + char data[ISER_RECV_DATA_SEG_LEN]; + u64 dma_addr; + struct ib_sge rx_sg; + struct ib_cqe cqe; + char pad[ISER_RX_PAD_SIZE]; +} __packed; + +/** + * struct iser_login_desc - iSER login descriptor + * + * @req: pointer to login request buffer + * @resp: pointer to login response buffer + * @req_dma: DMA address of login request buffer + * @rsp_dma: DMA address of login response buffer + * @sge: IB sge for login post recv + * @cqe: completion handler + */ +struct iser_login_desc { + void *req; + void *rsp; + u64 req_dma; + u64 rsp_dma; + struct ib_sge sge; + struct ib_cqe cqe; +} __attribute__((packed)); + +struct iser_conn; +struct ib_conn; +struct iscsi_iser_task; + +/** + * struct iser_comp - iSER completion context + * + * @cq: completion queue + * @active_qps: Number of active QPs attached + * to completion context + */ +struct iser_comp { + struct ib_cq *cq; + int active_qps; +}; + +/** + * struct iser_device - Memory registration operations + * per-device registration schemes + * + * @alloc_reg_res: Allocate registration resources + * @free_reg_res: Free registration resources + * @fast_reg_mem: Register memory buffers + * @unreg_mem: Un-register memory buffers + * @reg_desc_get: Get a registration descriptor for pool + * @reg_desc_put: Get a registration descriptor to pool + */ +struct iser_reg_ops { + int (*alloc_reg_res)(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); + void (*free_reg_res)(struct ib_conn *ib_conn); + int (*reg_mem)(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg); + void (*unreg_mem)(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); + struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn); + void (*reg_desc_put)(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +}; + +/** + * struct iser_device - iSER device handle + * + * @ib_device: RDMA device + * @pd: Protection Domain for this device + * @mr: Global DMA memory region + * @event_handler: IB events handle routine + * @ig_list: entry in devices list + * @refcount: Reference counter, dominated by open iser connections + * @comps_used: Number of completion contexts used, Min between online + * cpus and device max completion vectors + * @comps: Dinamically allocated array of completion handlers + * @reg_ops: Registration ops + * @remote_inv_sup: Remote invalidate is supported on this device + */ +struct iser_device { + struct ib_device *ib_device; + struct ib_pd *pd; + struct ib_event_handler event_handler; + struct list_head ig_list; + int refcount; + int comps_used; + struct iser_comp *comps; + const struct iser_reg_ops *reg_ops; + bool remote_inv_sup; +}; + +/** + * struct iser_reg_resources - Fast registration recources + * + * @mr: memory region + * @fmr_pool: pool of fmrs + * @page_vec: fast reg page list used by fmr pool + * @mr_valid: is mr valid indicator + */ +struct iser_reg_resources { + union { + struct ib_mr *mr; + struct ib_fmr_pool *fmr_pool; + }; + struct iser_page_vec *page_vec; + u8 mr_valid:1; +}; + +/** + * struct iser_pi_context - Protection information context + * + * @rsc: protection buffer registration resources + * @sig_mr: signature enable memory region + * @sig_mr_valid: is sig_mr valid indicator + * @sig_protected: is region protected indicator + */ +struct iser_pi_context { + struct iser_reg_resources rsc; + struct ib_mr *sig_mr; + u8 sig_mr_valid:1; + u8 sig_protected:1; +}; + +/** + * struct iser_fr_desc - Fast registration descriptor + * + * @list: entry in connection fastreg pool + * @rsc: data buffer registration resources + * @pi_ctx: protection information context + */ +struct iser_fr_desc { + struct list_head list; + struct iser_reg_resources rsc; + struct iser_pi_context *pi_ctx; + struct list_head all_list; +}; + +/** + * struct iser_fr_pool: connection fast registration pool + * + * @list: list of fastreg descriptors + * @lock: protects fmr/fastreg pool + * @size: size of the pool + */ +struct iser_fr_pool { + struct list_head list; + spinlock_t lock; + int size; + struct list_head all_list; +}; + +/** + * struct ib_conn - Infiniband related objects + * + * @cma_id: rdma_cm connection maneger handle + * @qp: Connection Queue-pair + * @post_recv_buf_count: post receive counter + * @sig_count: send work request signal count + * @rx_wr: receive work request for batch posts + * @device: reference to iser device + * @comp: iser completion context + * @fr_pool: connection fast registration poool + * @pi_support: Indicate device T10-PI support + */ +struct ib_conn { + struct rdma_cm_id *cma_id; + struct ib_qp *qp; + int post_recv_buf_count; + u8 sig_count; + struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; + struct iser_device *device; + struct iser_comp *comp; + struct iser_fr_pool fr_pool; + bool pi_support; + struct ib_cqe reg_cqe; +}; + +/** + * struct iser_conn - iSER connection context + * + * @ib_conn: connection RDMA resources + * @iscsi_conn: link to matching iscsi connection + * @ep: transport handle + * @state: connection logical state + * @qp_max_recv_dtos: maximum number of data outs, corresponds + * to max number of post recvs + * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1) + * @min_posted_rx: (qp_max_recv_dtos >> 2) + * @max_cmds: maximum cmds allowed for this connection + * @name: connection peer portal + * @release_work: deffered work for release job + * @state_mutex: protects iser onnection state + * @stop_completion: conn_stop completion + * @ib_completion: RDMA cleanup completion + * @up_completion: connection establishment completed + * (state is ISER_CONN_UP) + * @conn_list: entry in ig conn list + * @login_desc: login descriptor + * @rx_desc_head: head of rx_descs cyclic buffer + * @rx_descs: rx buffers array (cyclic buffer) + * @num_rx_descs: number of rx descriptors + * @scsi_sg_tablesize: scsi host sg_tablesize + * @pages_per_mr: maximum pages available for registration + */ +struct iser_conn { + struct ib_conn ib_conn; + struct iscsi_conn *iscsi_conn; + struct iscsi_endpoint *ep; + enum iser_conn_state state; + unsigned qp_max_recv_dtos; + unsigned qp_max_recv_dtos_mask; + unsigned min_posted_rx; + u16 max_cmds; + char name[ISER_OBJECT_NAME_SIZE]; + struct work_struct release_work; + struct mutex state_mutex; + struct completion stop_completion; + struct completion ib_completion; + struct completion up_completion; + struct list_head conn_list; + struct iser_login_desc login_desc; + unsigned int rx_desc_head; + struct iser_rx_desc *rx_descs; + u32 num_rx_descs; + unsigned short scsi_sg_tablesize; + unsigned short pages_per_mr; + bool snd_w_inv; +}; + +/** + * struct iscsi_iser_task - iser task context + * + * @desc: TX descriptor + * @iser_conn: link to iser connection + * @status: current task status + * @sc: link to scsi command + * @command_sent: indicate if command was sent + * @dir: iser data direction + * @rdma_reg: task rdma registration desc + * @data: iser data buffer desc + * @prot: iser protection buffer desc + */ +struct iscsi_iser_task { + struct iser_tx_desc desc; + struct iser_conn *iser_conn; + enum iser_task_status status; + struct scsi_cmnd *sc; + int command_sent; + int dir[ISER_DIRS_NUM]; + struct iser_mem_reg rdma_reg[ISER_DIRS_NUM]; + struct iser_data_buf data[ISER_DIRS_NUM]; + struct iser_data_buf prot[ISER_DIRS_NUM]; +}; + +struct iser_page_vec { + u64 *pages; + int npages; + struct ib_mr fake_mr; +}; + +/** + * struct iser_global: iSER global context + * + * @device_list_mutex: protects device_list + * @device_list: iser devices global list + * @connlist_mutex: protects connlist + * @connlist: iser connections global list + * @desc_cache: kmem cache for tx dataout + */ +struct iser_global { + struct mutex device_list_mutex; + struct list_head device_list; + struct mutex connlist_mutex; + struct list_head connlist; + struct kmem_cache *desc_cache; +}; + +extern struct iser_global ig; +extern int iser_debug_level; +extern bool iser_pi_enable; +extern int iser_pi_guard; +extern unsigned int iser_max_sectors; +extern bool iser_always_reg; + +int iser_assign_reg_ops(struct iser_device *device); + +int iser_send_control(struct iscsi_conn *conn, + struct iscsi_task *task); + +int iser_send_command(struct iscsi_conn *conn, + struct iscsi_task *task); + +int iser_send_data_out(struct iscsi_conn *conn, + struct iscsi_task *task, + struct iscsi_data *hdr); + +void iscsi_iser_recv(struct iscsi_conn *conn, + struct iscsi_hdr *hdr, + char *rx_data, + int rx_data_len); + +void iser_conn_init(struct iser_conn *iser_conn); + +void iser_conn_release(struct iser_conn *iser_conn); + +int iser_conn_terminate(struct iser_conn *iser_conn); + +void iser_release_work(struct work_struct *work); + +void iser_err_comp(struct ib_wc *wc, const char *type); +void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc); +void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc); +void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc); +void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc); +void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc); +void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc); + +void iser_task_rdma_init(struct iscsi_iser_task *task); + +void iser_task_rdma_finalize(struct iscsi_iser_task *task); + +void iser_free_rx_descriptors(struct iser_conn *iser_conn); + +void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + enum iser_data_dir cmd_dir); + +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir, + bool all_imm); +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir); + +int iser_connect(struct iser_conn *iser_conn, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, + int non_blocking); + +void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); +void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir); + +int iser_post_recvl(struct iser_conn *iser_conn); +int iser_post_recvm(struct iser_conn *iser_conn, int count); +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, + bool signal); + +int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum iser_data_dir iser_dir, + enum dma_data_direction dma_dir); + +void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum dma_data_direction dir); + +int iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc); +int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, + struct iscsi_session *session); +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); +void iser_free_fmr_pool(struct ib_conn *ib_conn); +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size); +void iser_free_fastreg_pool(struct ib_conn *ib_conn); +u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir, sector_t *sector); +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn); +void +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn); +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc); + +static inline struct ib_send_wr * +iser_tx_next_wr(struct iser_tx_desc *tx_desc) +{ + struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send; + struct ib_send_wr *last_wr; + + if (tx_desc->wr_idx) { + last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send; + last_wr->next = cur_wr; + } + tx_desc->wr_idx++; + + return cur_wr; +} + +static inline struct iser_conn * +to_iser_conn(struct ib_conn *ib_conn) +{ + return container_of(ib_conn, struct iser_conn, ib_conn); +} + +static inline struct iser_rx_desc * +iser_rx(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_rx_desc, cqe); +} + +static inline struct iser_tx_desc * +iser_tx(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_tx_desc, cqe); +} + +static inline struct iser_login_desc * +iser_login(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_login_desc, cqe); +} + +#endif diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c new file mode 100644 index 000000000..96af06cfe --- /dev/null +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -0,0 +1,784 @@ +/* + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/kfifo.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> + +#include "iscsi_iser.h" + +/* Register user buffer memory and initialize passive rdma + * dto descriptor. Data size is stored in + * task->data[ISER_DIR_IN].data_len, Protection size + * os stored in task->prot[ISER_DIR_IN].data_len + */ +static int iser_prepare_read_cmd(struct iscsi_task *task) + +{ + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_mem_reg *mem_reg; + int err; + struct iser_ctrl *hdr = &iser_task->desc.iser_header; + struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; + + err = iser_dma_map_task_data(iser_task, + buf_in, + ISER_DIR_IN, + DMA_FROM_DEVICE); + if (err) + return err; + + if (scsi_prot_sg_count(iser_task->sc)) { + struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN]; + + err = iser_dma_map_task_data(iser_task, + pbuf_in, + ISER_DIR_IN, + DMA_FROM_DEVICE); + if (err) + return err; + } + + err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false); + if (err) { + iser_err("Failed to set up Data-IN RDMA\n"); + return err; + } + mem_reg = &iser_task->rdma_reg[ISER_DIR_IN]; + + hdr->flags |= ISER_RSV; + hdr->read_stag = cpu_to_be32(mem_reg->rkey); + hdr->read_va = cpu_to_be64(mem_reg->sge.addr); + + iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", + task->itt, mem_reg->rkey, + (unsigned long long)mem_reg->sge.addr); + + return 0; +} + +/* Register user buffer memory and initialize passive rdma + * dto descriptor. Data size is stored in + * task->data[ISER_DIR_OUT].data_len, Protection size + * is stored at task->prot[ISER_DIR_OUT].data_len + */ +static int +iser_prepare_write_cmd(struct iscsi_task *task, + unsigned int imm_sz, + unsigned int unsol_sz, + unsigned int edtl) +{ + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_mem_reg *mem_reg; + int err; + struct iser_ctrl *hdr = &iser_task->desc.iser_header; + struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; + struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1]; + + err = iser_dma_map_task_data(iser_task, + buf_out, + ISER_DIR_OUT, + DMA_TO_DEVICE); + if (err) + return err; + + if (scsi_prot_sg_count(iser_task->sc)) { + struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT]; + + err = iser_dma_map_task_data(iser_task, + pbuf_out, + ISER_DIR_OUT, + DMA_TO_DEVICE); + if (err) + return err; + } + + err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT, + buf_out->data_len == imm_sz); + if (err != 0) { + iser_err("Failed to register write cmd RDMA mem\n"); + return err; + } + + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; + + if (unsol_sz < edtl) { + hdr->flags |= ISER_WSV; + if (buf_out->data_len > imm_sz) { + hdr->write_stag = cpu_to_be32(mem_reg->rkey); + hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); + } + + iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n", + task->itt, mem_reg->rkey, + (unsigned long long)mem_reg->sge.addr, unsol_sz); + } + + if (imm_sz > 0) { + iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", + task->itt, imm_sz); + tx_dsg->addr = mem_reg->sge.addr; + tx_dsg->length = imm_sz; + tx_dsg->lkey = mem_reg->sge.lkey; + iser_task->desc.num_sge = 2; + } + + return 0; +} + +/* creates a new tx descriptor and adds header regd buffer */ +static void iser_create_send_desc(struct iser_conn *iser_conn, + struct iser_tx_desc *tx_desc) +{ + struct iser_device *device = iser_conn->ib_conn.device; + + ib_dma_sync_single_for_cpu(device->ib_device, + tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); + + memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); + tx_desc->iser_header.flags = ISER_VER; + tx_desc->num_sge = 1; +} + +static void iser_free_login_buf(struct iser_conn *iser_conn) +{ + struct iser_device *device = iser_conn->ib_conn.device; + struct iser_login_desc *desc = &iser_conn->login_desc; + + if (!desc->req) + return; + + ib_dma_unmap_single(device->ib_device, desc->req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); + + ib_dma_unmap_single(device->ib_device, desc->rsp_dma, + ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); + + kfree(desc->req); + kfree(desc->rsp); + + /* make sure we never redo any unmapping */ + desc->req = NULL; + desc->rsp = NULL; +} + +static int iser_alloc_login_buf(struct iser_conn *iser_conn) +{ + struct iser_device *device = iser_conn->ib_conn.device; + struct iser_login_desc *desc = &iser_conn->login_desc; + + desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); + if (!desc->req) + return -ENOMEM; + + desc->req_dma = ib_dma_map_single(device->ib_device, desc->req, + ISCSI_DEF_MAX_RECV_SEG_LEN, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(device->ib_device, + desc->req_dma)) + goto free_req; + + desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); + if (!desc->rsp) + goto unmap_req; + + desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp, + ISER_RX_LOGIN_SIZE, + DMA_FROM_DEVICE); + if (ib_dma_mapping_error(device->ib_device, + desc->rsp_dma)) + goto free_rsp; + + return 0; + +free_rsp: + kfree(desc->rsp); +unmap_req: + ib_dma_unmap_single(device->ib_device, desc->req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, + DMA_TO_DEVICE); +free_req: + kfree(desc->req); + + return -ENOMEM; +} + +int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, + struct iscsi_session *session) +{ + int i, j; + u64 dma_addr; + struct iser_rx_desc *rx_desc; + struct ib_sge *rx_sg; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + iser_conn->qp_max_recv_dtos = session->cmds_max; + iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ + iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; + + if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max, + iser_conn->pages_per_mr)) + goto create_rdma_reg_res_failed; + + if (iser_alloc_login_buf(iser_conn)) + goto alloc_login_buf_fail; + + iser_conn->num_rx_descs = session->cmds_max; + iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs, + sizeof(struct iser_rx_desc), + GFP_KERNEL); + if (!iser_conn->rx_descs) + goto rx_desc_alloc_fail; + + rx_desc = iser_conn->rx_descs; + + for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) { + dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(device->ib_device, dma_addr)) + goto rx_desc_dma_map_failed; + + rx_desc->dma_addr = dma_addr; + rx_desc->cqe.done = iser_task_rsp; + rx_sg = &rx_desc->rx_sg; + rx_sg->addr = rx_desc->dma_addr; + rx_sg->length = ISER_RX_PAYLOAD_SIZE; + rx_sg->lkey = device->pd->local_dma_lkey; + } + + iser_conn->rx_desc_head = 0; + return 0; + +rx_desc_dma_map_failed: + rx_desc = iser_conn->rx_descs; + for (j = 0; j < i; j++, rx_desc++) + ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + kfree(iser_conn->rx_descs); + iser_conn->rx_descs = NULL; +rx_desc_alloc_fail: + iser_free_login_buf(iser_conn); +alloc_login_buf_fail: + device->reg_ops->free_reg_res(ib_conn); +create_rdma_reg_res_failed: + iser_err("failed allocating rx descriptors / data buffers\n"); + return -ENOMEM; +} + +void iser_free_rx_descriptors(struct iser_conn *iser_conn) +{ + int i; + struct iser_rx_desc *rx_desc; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + if (device->reg_ops->free_reg_res) + device->reg_ops->free_reg_res(ib_conn); + + rx_desc = iser_conn->rx_descs; + for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) + ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + kfree(iser_conn->rx_descs); + /* make sure we never redo any unmapping */ + iser_conn->rx_descs = NULL; + + iser_free_login_buf(iser_conn); +} + +static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iscsi_session *session = conn->session; + + iser_dbg("req op %x flags %x\n", req->opcode, req->flags); + /* check if this is the last login - going to full feature phase */ + if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) + return 0; + + /* + * Check that there is one posted recv buffer + * (for the last login response). + */ + WARN_ON(ib_conn->post_recv_buf_count != 1); + + if (session->discovery_sess) { + iser_info("Discovery session, re-using login RX buffer\n"); + return 0; + } else + iser_info("Normal session, posting batch of RX %d buffers\n", + iser_conn->min_posted_rx); + + /* Initial post receive buffers */ + if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx)) + return -ENOMEM; + + return 0; +} + +static inline bool iser_signal_comp(u8 sig_count) +{ + return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0); +} + +/** + * iser_send_command - send command PDU + */ +int iser_send_command(struct iscsi_conn *conn, + struct iscsi_task *task) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + unsigned long edtl; + int err; + struct iser_data_buf *data_buf, *prot_buf; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; + struct scsi_cmnd *sc = task->sc; + struct iser_tx_desc *tx_desc = &iser_task->desc; + u8 sig_count = ++iser_conn->ib_conn.sig_count; + + edtl = ntohl(hdr->data_length); + + /* build the tx desc regd header and add it to the tx desc dto */ + tx_desc->type = ISCSI_TX_SCSI_COMMAND; + tx_desc->cqe.done = iser_cmd_comp; + iser_create_send_desc(iser_conn, tx_desc); + + if (hdr->flags & ISCSI_FLAG_CMD_READ) { + data_buf = &iser_task->data[ISER_DIR_IN]; + prot_buf = &iser_task->prot[ISER_DIR_IN]; + } else { + data_buf = &iser_task->data[ISER_DIR_OUT]; + prot_buf = &iser_task->prot[ISER_DIR_OUT]; + } + + if (scsi_sg_count(sc)) { /* using a scatter list */ + data_buf->sg = scsi_sglist(sc); + data_buf->size = scsi_sg_count(sc); + } + data_buf->data_len = scsi_bufflen(sc); + + if (scsi_prot_sg_count(sc)) { + prot_buf->sg = scsi_prot_sglist(sc); + prot_buf->size = scsi_prot_sg_count(sc); + prot_buf->data_len = (data_buf->data_len >> + ilog2(sc->device->sector_size)) * 8; + } + + if (hdr->flags & ISCSI_FLAG_CMD_READ) { + err = iser_prepare_read_cmd(task); + if (err) + goto send_command_error; + } + if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { + err = iser_prepare_write_cmd(task, + task->imm_count, + task->imm_count + + task->unsol_r2t.data_length, + edtl); + if (err) + goto send_command_error; + } + + iser_task->status = ISER_TASK_STATUS_STARTED; + + err = iser_post_send(&iser_conn->ib_conn, tx_desc, + iser_signal_comp(sig_count)); + if (!err) + return 0; + +send_command_error: + iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); + return err; +} + +/** + * iser_send_data_out - send data out PDU + */ +int iser_send_data_out(struct iscsi_conn *conn, + struct iscsi_task *task, + struct iscsi_data *hdr) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_tx_desc *tx_desc; + struct iser_mem_reg *mem_reg; + unsigned long buf_offset; + unsigned long data_seg_len; + uint32_t itt; + int err; + struct ib_sge *tx_dsg; + + itt = (__force uint32_t)hdr->itt; + data_seg_len = ntoh24(hdr->dlength); + buf_offset = ntohl(hdr->offset); + + iser_dbg("%s itt %d dseg_len %d offset %d\n", + __func__,(int)itt,(int)data_seg_len,(int)buf_offset); + + tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC); + if (!tx_desc) + return -ENOMEM; + + tx_desc->type = ISCSI_TX_DATAOUT; + tx_desc->cqe.done = iser_dataout_comp; + tx_desc->iser_header.flags = ISER_VER; + memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); + + /* build the tx desc */ + err = iser_initialize_task_headers(task, tx_desc); + if (err) + goto send_data_out_error; + + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; + tx_dsg = &tx_desc->tx_sg[1]; + tx_dsg->addr = mem_reg->sge.addr + buf_offset; + tx_dsg->length = data_seg_len; + tx_dsg->lkey = mem_reg->sge.lkey; + tx_desc->num_sge = 2; + + if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { + iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n", + buf_offset, data_seg_len, + iser_task->data[ISER_DIR_OUT].data_len, itt); + err = -EINVAL; + goto send_data_out_error; + } + iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n", + itt, buf_offset, data_seg_len); + + + err = iser_post_send(&iser_conn->ib_conn, tx_desc, true); + if (!err) + return 0; + +send_data_out_error: + kmem_cache_free(ig.desc_cache, tx_desc); + iser_err("conn %p failed err %d\n", conn, err); + return err; +} + +int iser_send_control(struct iscsi_conn *conn, + struct iscsi_task *task) +{ + struct iser_conn *iser_conn = conn->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_tx_desc *mdesc = &iser_task->desc; + unsigned long data_seg_len; + int err = 0; + struct iser_device *device; + + /* build the tx desc regd header and add it to the tx desc dto */ + mdesc->type = ISCSI_TX_CONTROL; + mdesc->cqe.done = iser_ctrl_comp; + iser_create_send_desc(iser_conn, mdesc); + + device = iser_conn->ib_conn.device; + + data_seg_len = ntoh24(task->hdr->dlength); + + if (data_seg_len > 0) { + struct iser_login_desc *desc = &iser_conn->login_desc; + struct ib_sge *tx_dsg = &mdesc->tx_sg[1]; + + if (task != conn->login_task) { + iser_err("data present on non login task!!!\n"); + goto send_control_error; + } + + ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma, + task->data_count, DMA_TO_DEVICE); + + memcpy(desc->req, task->data, task->data_count); + + ib_dma_sync_single_for_device(device->ib_device, desc->req_dma, + task->data_count, DMA_TO_DEVICE); + + tx_dsg->addr = desc->req_dma; + tx_dsg->length = task->data_count; + tx_dsg->lkey = device->pd->local_dma_lkey; + mdesc->num_sge = 2; + } + + if (task == conn->login_task) { + iser_dbg("op %x dsl %lx, posting login rx buffer\n", + task->hdr->opcode, data_seg_len); + err = iser_post_recvl(iser_conn); + if (err) + goto send_control_error; + err = iser_post_rx_bufs(conn, task->hdr); + if (err) + goto send_control_error; + } + + err = iser_post_send(&iser_conn->ib_conn, mdesc, true); + if (!err) + return 0; + +send_control_error: + iser_err("conn %p failed err %d\n",conn, err); + return err; +} + +void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct ib_conn *ib_conn = wc->qp->qp_context; + struct iser_conn *iser_conn = to_iser_conn(ib_conn); + struct iser_login_desc *desc = iser_login(wc->wr_cqe); + struct iscsi_hdr *hdr; + char *data; + int length; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + iser_err_comp(wc, "login_rsp"); + return; + } + + ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, + desc->rsp_dma, ISER_RX_LOGIN_SIZE, + DMA_FROM_DEVICE); + + hdr = desc->rsp + sizeof(struct iser_ctrl); + data = desc->rsp + ISER_HEADERS_LEN; + length = wc->byte_len - ISER_HEADERS_LEN; + + iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, + hdr->itt, length); + + iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length); + + ib_dma_sync_single_for_device(ib_conn->device->ib_device, + desc->rsp_dma, ISER_RX_LOGIN_SIZE, + DMA_FROM_DEVICE); + + ib_conn->post_recv_buf_count--; +} + +static inline int +iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) +{ + if (likely(rkey == desc->rsc.mr->rkey)) { + desc->rsc.mr_valid = 0; + } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) { + desc->pi_ctx->sig_mr_valid = 0; + } else { + iser_err("Bogus remote invalidation for rkey %#x\n", rkey); + return -EINVAL; + } + + return 0; +} + +static int +iser_check_remote_inv(struct iser_conn *iser_conn, + struct ib_wc *wc, + struct iscsi_hdr *hdr) +{ + if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { + struct iscsi_task *task; + u32 rkey = wc->ex.invalidate_rkey; + + iser_dbg("conn %p: remote invalidation for rkey %#x\n", + iser_conn, rkey); + + if (unlikely(!iser_conn->snd_w_inv)) { + iser_err("conn %p: unexpected remote invalidation, terminating connection\n", + iser_conn); + return -EPROTO; + } + + task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt); + if (likely(task)) { + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_fr_desc *desc; + + if (iser_task->dir[ISER_DIR_IN]) { + desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h; + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; + } + + if (iser_task->dir[ISER_DIR_OUT]) { + desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h; + if (unlikely(iser_inv_desc(desc, rkey))) + return -EINVAL; + } + } else { + iser_err("failed to get task for itt=%d\n", hdr->itt); + return -EINVAL; + } + } + + return 0; +} + + +void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct ib_conn *ib_conn = wc->qp->qp_context; + struct iser_conn *iser_conn = to_iser_conn(ib_conn); + struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); + struct iscsi_hdr *hdr; + int length; + int outstanding, count, err; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + iser_err_comp(wc, "task_rsp"); + return; + } + + ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, + desc->dma_addr, ISER_RX_PAYLOAD_SIZE, + DMA_FROM_DEVICE); + + hdr = &desc->iscsi_header; + length = wc->byte_len - ISER_HEADERS_LEN; + + iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, + hdr->itt, length); + + if (iser_check_remote_inv(iser_conn, wc, hdr)) { + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + return; + } + + iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length); + + ib_dma_sync_single_for_device(ib_conn->device->ib_device, + desc->dma_addr, ISER_RX_PAYLOAD_SIZE, + DMA_FROM_DEVICE); + + /* decrementing conn->post_recv_buf_count only --after-- freeing the * + * task eliminates the need to worry on tasks which are completed in * + * parallel to the execution of iser_conn_term. So the code that waits * + * for the posted rx bufs refcount to become zero handles everything */ + ib_conn->post_recv_buf_count--; + + outstanding = ib_conn->post_recv_buf_count; + if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { + count = min(iser_conn->qp_max_recv_dtos - outstanding, + iser_conn->min_posted_rx); + err = iser_post_recvm(iser_conn, count); + if (err) + iser_err("posting %d rx bufs err %d\n", count, err); + } +} + +void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + if (unlikely(wc->status != IB_WC_SUCCESS)) + iser_err_comp(wc, "command"); +} + +void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); + struct iscsi_task *task; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + iser_err_comp(wc, "control"); + return; + } + + /* this arithmetic is legal by libiscsi dd_data allocation */ + task = (void *)desc - sizeof(struct iscsi_task); + if (task->hdr->itt == RESERVED_ITT) + iscsi_put_task(task); +} + +void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); + struct ib_conn *ib_conn = wc->qp->qp_context; + struct iser_device *device = ib_conn->device; + + if (unlikely(wc->status != IB_WC_SUCCESS)) + iser_err_comp(wc, "dataout"); + + ib_dma_unmap_single(device->ib_device, desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + kmem_cache_free(ig.desc_cache, desc); +} + +void iser_task_rdma_init(struct iscsi_iser_task *iser_task) + +{ + iser_task->status = ISER_TASK_STATUS_INIT; + + iser_task->dir[ISER_DIR_IN] = 0; + iser_task->dir[ISER_DIR_OUT] = 0; + + iser_task->data[ISER_DIR_IN].data_len = 0; + iser_task->data[ISER_DIR_OUT].data_len = 0; + + iser_task->prot[ISER_DIR_IN].data_len = 0; + iser_task->prot[ISER_DIR_OUT].data_len = 0; + + memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, + sizeof(struct iser_mem_reg)); + memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, + sizeof(struct iser_mem_reg)); +} + +void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) +{ + int prot_count = scsi_prot_sg_count(iser_task->sc); + + if (iser_task->dir[ISER_DIR_IN]) { + iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_IN], + DMA_FROM_DEVICE); + if (prot_count) + iser_dma_unmap_task_data(iser_task, + &iser_task->prot[ISER_DIR_IN], + DMA_FROM_DEVICE); + } + + if (iser_task->dir[ISER_DIR_OUT]) { + iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); + iser_dma_unmap_task_data(iser_task, + &iser_task->data[ISER_DIR_OUT], + DMA_TO_DEVICE); + if (prot_count) + iser_dma_unmap_task_data(iser_task, + &iser_task->prot[ISER_DIR_OUT], + DMA_TO_DEVICE); + } +} diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c new file mode 100644 index 000000000..379bc0dfc --- /dev/null +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/highmem.h> +#include <linux/scatterlist.h> + +#include "iscsi_iser.h" +static +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); +static +int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *mem_reg); + +static const struct iser_reg_ops fastreg_ops = { + .alloc_reg_res = iser_alloc_fastreg_pool, + .free_reg_res = iser_free_fastreg_pool, + .reg_mem = iser_fast_reg_mr, + .unreg_mem = iser_unreg_mem_fastreg, + .reg_desc_get = iser_reg_desc_get_fr, + .reg_desc_put = iser_reg_desc_put_fr, +}; + +static const struct iser_reg_ops fmr_ops = { + .alloc_reg_res = iser_alloc_fmr_pool, + .free_reg_res = iser_free_fmr_pool, + .reg_mem = iser_fast_reg_fmr, + .unreg_mem = iser_unreg_mem_fmr, + .reg_desc_get = iser_reg_desc_get_fmr, + .reg_desc_put = iser_reg_desc_put_fmr, +}; + +void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) +{ + iser_err_comp(wc, "memreg"); +} + +int iser_assign_reg_ops(struct iser_device *device) +{ + struct ib_device *ib_dev = device->ib_device; + + /* Assign function handles - based on FMR support */ + if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr && + ib_dev->map_phys_fmr && ib_dev->unmap_fmr) { + iser_info("FMR supported, using FMR for registration\n"); + device->reg_ops = &fmr_ops; + } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + iser_info("FastReg supported, using FastReg for registration\n"); + device->reg_ops = &fastreg_ops; + device->remote_inv_sup = iser_always_reg; + } else { + iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); + return -1; + } + + return 0; +} + +struct iser_fr_desc * +iser_reg_desc_get_fr(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + unsigned long flags; + + spin_lock_irqsave(&fr_pool->lock, flags); + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); + list_del(&desc->list); + spin_unlock_irqrestore(&fr_pool->lock, flags); + + return desc; +} + +void +iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + unsigned long flags; + + spin_lock_irqsave(&fr_pool->lock, flags); + list_add(&desc->list, &fr_pool->list); + spin_unlock_irqrestore(&fr_pool->lock, flags); +} + +struct iser_fr_desc * +iser_reg_desc_get_fmr(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + + return list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); +} + +void +iser_reg_desc_put_fmr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) +{ +} + +static void iser_data_buf_dump(struct iser_data_buf *data, + struct ib_device *ibdev) +{ + struct scatterlist *sg; + int i; + + for_each_sg(data->sg, sg, data->dma_nents, i) + iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " + "off:0x%x sz:0x%x dma_len:0x%x\n", + i, (unsigned long)ib_sg_dma_address(ibdev, sg), + sg_page(sg), sg->offset, + sg->length, ib_sg_dma_len(ibdev, sg)); +} + +static void iser_dump_page_vec(struct iser_page_vec *page_vec) +{ + int i; + + iser_err("page vec npages %d data length %lld\n", + page_vec->npages, page_vec->fake_mr.length); + for (i = 0; i < page_vec->npages; i++) + iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); +} + +int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum iser_data_dir iser_dir, + enum dma_data_direction dma_dir) +{ + struct ib_device *dev; + + iser_task->dir[iser_dir] = 1; + dev = iser_task->iser_conn->ib_conn.device->ib_device; + + data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir); + if (data->dma_nents == 0) { + iser_err("dma_map_sg failed!!!\n"); + return -EINVAL; + } + return 0; +} + +void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum dma_data_direction dir) +{ + struct ib_device *dev; + + dev = iser_task->iser_conn->ib_conn.device->ib_device; + ib_dma_unmap_sg(dev, data->sg, data->size, dir); +} + +static int +iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, + struct iser_mem_reg *reg) +{ + struct scatterlist *sg = mem->sg; + + reg->sge.lkey = device->pd->local_dma_lkey; + /* + * FIXME: rework the registration code path to differentiate + * rkey/lkey use cases + */ + + if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) + reg->rkey = device->pd->unsafe_global_rkey; + else + reg->rkey = 0; + reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); + reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); + + iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx," + " length=0x%x\n", reg->sge.lkey, reg->rkey, + reg->sge.addr, reg->sge.length); + + return 0; +} + +static int iser_set_page(struct ib_mr *mr, u64 addr) +{ + struct iser_page_vec *page_vec = + container_of(mr, struct iser_page_vec, fake_mr); + + page_vec->pages[page_vec->npages++] = addr; + + return 0; +} + +static +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg) +{ + struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + struct iser_page_vec *page_vec = rsc->page_vec; + struct ib_fmr_pool *fmr_pool = rsc->fmr_pool; + struct ib_pool_fmr *fmr; + int ret, plen; + + page_vec->npages = 0; + page_vec->fake_mr.page_size = SIZE_4K; + plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg, + mem->dma_nents, NULL, iser_set_page); + if (unlikely(plen < mem->dma_nents)) { + iser_err("page vec too short to hold this SG\n"); + iser_data_buf_dump(mem, device->ib_device); + iser_dump_page_vec(page_vec); + return -EINVAL; + } + + fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages, + page_vec->npages, page_vec->pages[0]); + if (IS_ERR(fmr)) { + ret = PTR_ERR(fmr); + iser_err("ib_fmr_pool_map_phys failed: %d\n", ret); + return ret; + } + + reg->sge.lkey = fmr->fmr->lkey; + reg->rkey = fmr->fmr->rkey; + reg->sge.addr = page_vec->fake_mr.iova; + reg->sge.length = page_vec->fake_mr.length; + reg->mem_h = fmr; + + iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx," + " length=0x%x\n", reg->sge.lkey, reg->rkey, + reg->sge.addr, reg->sge.length); + + return 0; +} + +/** + * Unregister (previosuly registered using FMR) memory. + * If memory is non-FMR does nothing. + */ +void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir) +{ + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + int ret; + + if (!reg->mem_h) + return; + + iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h); + + ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); + if (ret) + iser_err("ib_fmr_pool_unmap failed %d\n", ret); + + reg->mem_h = NULL; +} + +void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir) +{ + struct iser_device *device = iser_task->iser_conn->ib_conn.device; + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + + if (!reg->mem_h) + return; + + device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, + reg->mem_h); + reg->mem_h = NULL; +} + +static void +iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, + struct ib_sig_domain *domain) +{ + domain->sig_type = IB_SIG_TYPE_T10_DIF; + domain->sig.dif.pi_interval = scsi_prot_interval(sc); + domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request); + /* + * At the moment we hard code those, but in the future + * we will take them from sc. + */ + domain->sig.dif.apptag_check_mask = 0xffff; + domain->sig.dif.app_escape = true; + domain->sig.dif.ref_escape = true; + if (sc->prot_flags & SCSI_PROT_REF_INCREMENT) + domain->sig.dif.ref_remap = true; +}; + +static int +iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) +{ + switch (scsi_get_prot_op(sc)) { + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_STRIP: + sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); + sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? + IB_T10DIF_CSUM : IB_T10DIF_CRC; + break; + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire); + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; + iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem); + sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ? + IB_T10DIF_CSUM : IB_T10DIF_CRC; + break; + default: + iser_err("Unsupported PI operation %d\n", + scsi_get_prot_op(sc)); + return -EINVAL; + } + + return 0; +} + +static inline void +iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) +{ + *mask = 0; + if (sc->prot_flags & SCSI_PROT_REF_CHECK) + *mask |= IB_SIG_CHECK_REFTAG; + if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) + *mask |= IB_SIG_CHECK_GUARD; +} + +static inline void +iser_inv_rkey(struct ib_send_wr *inv_wr, + struct ib_mr *mr, + struct ib_cqe *cqe) +{ + inv_wr->opcode = IB_WR_LOCAL_INV; + inv_wr->wr_cqe = cqe; + inv_wr->ex.invalidate_rkey = mr->rkey; + inv_wr->send_flags = 0; + inv_wr->num_sge = 0; +} + +static int +iser_reg_sig_mr(struct iscsi_iser_task *iser_task, + struct iser_pi_context *pi_ctx, + struct iser_mem_reg *data_reg, + struct iser_mem_reg *prot_reg, + struct iser_mem_reg *sig_reg) +{ + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; + struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; + struct ib_sig_handover_wr *wr; + struct ib_mr *mr = pi_ctx->sig_mr; + int ret; + + memset(sig_attrs, 0, sizeof(*sig_attrs)); + ret = iser_set_sig_attrs(iser_task->sc, sig_attrs); + if (ret) + goto err; + + iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); + + if (pi_ctx->sig_mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); + + ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); + + wr = container_of(iser_tx_next_wr(tx_desc), struct ib_sig_handover_wr, + wr); + wr->wr.opcode = IB_WR_REG_SIG_MR; + wr->wr.wr_cqe = cqe; + wr->wr.sg_list = &data_reg->sge; + wr->wr.num_sge = 1; + wr->wr.send_flags = 0; + wr->sig_attrs = sig_attrs; + wr->sig_mr = mr; + if (scsi_prot_sg_count(iser_task->sc)) + wr->prot = &prot_reg->sge; + else + wr->prot = NULL; + wr->access_flags = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + pi_ctx->sig_mr_valid = 1; + + sig_reg->sge.lkey = mr->lkey; + sig_reg->rkey = mr->rkey; + sig_reg->sge.addr = 0; + sig_reg->sge.length = scsi_transfer_length(iser_task->sc); + + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n", + sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, + sig_reg->sge.length); +err: + return ret; +} + +static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *reg) +{ + struct iser_tx_desc *tx_desc = &iser_task->desc; + struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; + struct ib_mr *mr = rsc->mr; + struct ib_reg_wr *wr; + int n; + + if (rsc->mr_valid) + iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); + + ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); + + n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K); + if (unlikely(n != mem->dma_nents)) { + iser_err("failed to map sg (%d/%d)\n", + n, mem->dma_nents); + return n < 0 ? n : -EINVAL; + } + + wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr); + wr->wr.opcode = IB_WR_REG_MR; + wr->wr.wr_cqe = cqe; + wr->wr.send_flags = 0; + wr->wr.num_sge = 0; + wr->mr = mr; + wr->key = mr->rkey; + wr->access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + rsc->mr_valid = 1; + + reg->sge.lkey = mr->lkey; + reg->rkey = mr->rkey; + reg->sge.addr = mr->iova; + reg->sge.length = mr->length; + + iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n", + reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length); + + return 0; +} + +static int +iser_reg_prot_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); +} + +static int +iser_reg_data_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, + bool use_dma_key, + struct iser_mem_reg *reg) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + if (use_dma_key) + return iser_reg_dma(device, mem, reg); + + return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); +} + +int iser_reg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir, + bool all_imm) +{ + struct ib_conn *ib_conn = &task->iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + struct iser_data_buf *mem = &task->data[dir]; + struct iser_mem_reg *reg = &task->rdma_reg[dir]; + struct iser_mem_reg *data_reg; + struct iser_fr_desc *desc = NULL; + bool use_dma_key; + int err; + + use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL; + + if (!use_dma_key) { + desc = device->reg_ops->reg_desc_get(ib_conn); + reg->mem_h = desc; + } + + if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) + data_reg = reg; + else + data_reg = &task->desc.data_reg; + + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); + if (unlikely(err)) + goto err_reg; + + if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + struct iser_mem_reg *prot_reg = &task->desc.prot_reg; + + if (scsi_prot_sg_count(task->sc)) { + mem = &task->prot[dir]; + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); + if (unlikely(err)) + goto err_reg; + } + + err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg, + prot_reg, reg); + if (unlikely(err)) + goto err_reg; + + desc->pi_ctx->sig_protected = 1; + } + + return 0; + +err_reg: + if (desc) + device->reg_ops->reg_desc_put(ib_conn, desc); + + return err; +} + +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, + enum iser_data_dir dir) +{ + struct iser_device *device = task->iser_conn->ib_conn.device; + + device->reg_ops->unreg_mem(task, dir); +} diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c new file mode 100644 index 000000000..bee8c0b1d --- /dev/null +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -0,0 +1,1174 @@ +/* + * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/delay.h> + +#include "iscsi_iser.h" + +#define ISCSI_ISER_MAX_CONN 8 +#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \ + ISCSI_ISER_MAX_CONN) + +static void iser_qp_event_callback(struct ib_event *cause, void *context) +{ + iser_err("qp event %s (%d)\n", + ib_event_msg(cause->event), cause->event); +} + +static void iser_event_handler(struct ib_event_handler *handler, + struct ib_event *event) +{ + iser_err("async event %s (%d) on device %s port %d\n", + ib_event_msg(event->event), event->event, + event->device->name, event->element.port_num); +} + +/** + * iser_create_device_ib_res - creates Protection Domain (PD), Completion + * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with + * the adapator. + * + * returns 0 on success, -1 on failure + */ +static int iser_create_device_ib_res(struct iser_device *device) +{ + struct ib_device *ib_dev = device->ib_device; + int ret, i, max_cqe; + + ret = iser_assign_reg_ops(device); + if (ret) + return ret; + + device->comps_used = min_t(int, num_online_cpus(), + ib_dev->num_comp_vectors); + + device->comps = kcalloc(device->comps_used, sizeof(*device->comps), + GFP_KERNEL); + if (!device->comps) + goto comps_err; + + max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe); + + iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n", + device->comps_used, ib_dev->name, + ib_dev->num_comp_vectors, max_cqe); + + device->pd = ib_alloc_pd(ib_dev, + iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); + if (IS_ERR(device->pd)) + goto pd_err; + + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i, + IB_POLL_SOFTIRQ); + if (IS_ERR(comp->cq)) { + comp->cq = NULL; + goto cq_err; + } + } + + INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, + iser_event_handler); + ib_register_event_handler(&device->event_handler); + return 0; + +cq_err: + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + if (comp->cq) + ib_free_cq(comp->cq); + } + ib_dealloc_pd(device->pd); +pd_err: + kfree(device->comps); +comps_err: + iser_err("failed to allocate an IB resource\n"); + return -1; +} + +/** + * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, + * CQ and PD created with the device associated with the adapator. + */ +static void iser_free_device_ib_res(struct iser_device *device) +{ + int i; + + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + ib_free_cq(comp->cq); + comp->cq = NULL; + } + + ib_unregister_event_handler(&device->event_handler); + ib_dealloc_pd(device->pd); + + kfree(device->comps); + device->comps = NULL; + device->pd = NULL; +} + +/** + * iser_alloc_fmr_pool - Creates FMR pool and page_vector + * + * returns 0 on success, or errno code on failure + */ +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) +{ + struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_page_vec *page_vec; + struct iser_fr_desc *desc; + struct ib_fmr_pool *fmr_pool; + struct ib_fmr_pool_param params; + int ret; + + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size), + GFP_KERNEL); + if (!page_vec) { + ret = -ENOMEM; + goto err_frpl; + } + + page_vec->pages = (u64 *)(page_vec + 1); + + params.page_shift = SHIFT_4K; + params.max_pages_per_fmr = size; + /* make the pool size twice the max number of SCSI commands * + * the ML is expected to queue, watermark for unmap at 50% */ + params.pool_size = cmds_max * 2; + params.dirty_watermark = cmds_max; + params.cache = 0; + params.flush_function = NULL; + params.access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ); + + fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); + if (IS_ERR(fmr_pool)) { + ret = PTR_ERR(fmr_pool); + iser_err("FMR allocation failed, err %d\n", ret); + goto err_fmr; + } + + desc->rsc.page_vec = page_vec; + desc->rsc.fmr_pool = fmr_pool; + list_add(&desc->list, &fr_pool->list); + + return 0; + +err_fmr: + kfree(page_vec); +err_frpl: + kfree(desc); + + return ret; +} + +/** + * iser_free_fmr_pool - releases the FMR pool and page vec + */ +void iser_free_fmr_pool(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); + list_del(&desc->list); + + iser_info("freeing conn %p fmr pool %p\n", + ib_conn, desc->rsc.fmr_pool); + + ib_destroy_fmr_pool(desc->rsc.fmr_pool); + kfree(desc->rsc.page_vec); + kfree(desc); +} + +static int +iser_alloc_reg_res(struct iser_device *device, + struct ib_pd *pd, + struct iser_reg_resources *res, + unsigned int size) +{ + struct ib_device *ib_dev = device->ib_device; + enum ib_mr_type mr_type; + int ret; + + if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) + mr_type = IB_MR_TYPE_SG_GAPS; + else + mr_type = IB_MR_TYPE_MEM_REG; + + res->mr = ib_alloc_mr(pd, mr_type, size); + if (IS_ERR(res->mr)) { + ret = PTR_ERR(res->mr); + iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); + return ret; + } + res->mr_valid = 0; + + return 0; +} + +static void +iser_free_reg_res(struct iser_reg_resources *rsc) +{ + ib_dereg_mr(rsc->mr); +} + +static int +iser_alloc_pi_ctx(struct iser_device *device, + struct ib_pd *pd, + struct iser_fr_desc *desc, + unsigned int size) +{ + struct iser_pi_context *pi_ctx = NULL; + int ret; + + desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); + if (!desc->pi_ctx) + return -ENOMEM; + + pi_ctx = desc->pi_ctx; + + ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size); + if (ret) { + iser_err("failed to allocate reg_resources\n"); + goto alloc_reg_res_err; + } + + pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); + if (IS_ERR(pi_ctx->sig_mr)) { + ret = PTR_ERR(pi_ctx->sig_mr); + goto sig_mr_failure; + } + pi_ctx->sig_mr_valid = 0; + desc->pi_ctx->sig_protected = 0; + + return 0; + +sig_mr_failure: + iser_free_reg_res(&pi_ctx->rsc); +alloc_reg_res_err: + kfree(desc->pi_ctx); + + return ret; +} + +static void +iser_free_pi_ctx(struct iser_pi_context *pi_ctx) +{ + iser_free_reg_res(&pi_ctx->rsc); + ib_dereg_mr(pi_ctx->sig_mr); + kfree(pi_ctx); +} + +static struct iser_fr_desc * +iser_create_fastreg_desc(struct iser_device *device, + struct ib_pd *pd, + bool pi_enable, + unsigned int size) +{ + struct iser_fr_desc *desc; + int ret; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + ret = iser_alloc_reg_res(device, pd, &desc->rsc, size); + if (ret) + goto reg_res_alloc_failure; + + if (pi_enable) { + ret = iser_alloc_pi_ctx(device, pd, desc, size); + if (ret) + goto pi_ctx_alloc_failure; + } + + return desc; + +pi_ctx_alloc_failure: + iser_free_reg_res(&desc->rsc); +reg_res_alloc_failure: + kfree(desc); + + return ERR_PTR(ret); +} + +/** + * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors + * for fast registration work requests. + * returns 0 on success, or errno code on failure + */ +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) +{ + struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + int i, ret; + + INIT_LIST_HEAD(&fr_pool->list); + INIT_LIST_HEAD(&fr_pool->all_list); + spin_lock_init(&fr_pool->lock); + fr_pool->size = 0; + for (i = 0; i < cmds_max; i++) { + desc = iser_create_fastreg_desc(device, device->pd, + ib_conn->pi_support, size); + if (IS_ERR(desc)) { + ret = PTR_ERR(desc); + goto err; + } + + list_add_tail(&desc->list, &fr_pool->list); + list_add_tail(&desc->all_list, &fr_pool->all_list); + fr_pool->size++; + } + + return 0; + +err: + iser_free_fastreg_pool(ib_conn); + return ret; +} + +/** + * iser_free_fastreg_pool - releases the pool of fast_reg descriptors + */ +void iser_free_fastreg_pool(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc, *tmp; + int i = 0; + + if (list_empty(&fr_pool->all_list)) + return; + + iser_info("freeing conn %p fr pool\n", ib_conn); + + list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { + list_del(&desc->all_list); + iser_free_reg_res(&desc->rsc); + if (desc->pi_ctx) + iser_free_pi_ctx(desc->pi_ctx); + kfree(desc); + ++i; + } + + if (i < fr_pool->size) + iser_warn("pool still has %d regions registered\n", + fr_pool->size - i); +} + +/** + * iser_create_ib_conn_res - Queue-Pair (QP) + * + * returns 0 on success, -1 on failure + */ +static int iser_create_ib_conn_res(struct ib_conn *ib_conn) +{ + struct iser_conn *iser_conn = to_iser_conn(ib_conn); + struct iser_device *device; + struct ib_device *ib_dev; + struct ib_qp_init_attr init_attr; + int ret = -ENOMEM; + int index, min_index = 0; + + BUG_ON(ib_conn->device == NULL); + + device = ib_conn->device; + ib_dev = device->ib_device; + + memset(&init_attr, 0, sizeof init_attr); + + mutex_lock(&ig.connlist_mutex); + /* select the CQ with the minimal number of usages */ + for (index = 0; index < device->comps_used; index++) { + if (device->comps[index].active_qps < + device->comps[min_index].active_qps) + min_index = index; + } + ib_conn->comp = &device->comps[min_index]; + ib_conn->comp->active_qps++; + mutex_unlock(&ig.connlist_mutex); + iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); + + init_attr.event_handler = iser_qp_event_callback; + init_attr.qp_context = (void *)ib_conn; + init_attr.send_cq = ib_conn->comp->cq; + init_attr.recv_cq = ib_conn->comp->cq; + init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; + init_attr.cap.max_send_sge = 2; + init_attr.cap.max_recv_sge = 1; + init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + init_attr.qp_type = IB_QPT_RC; + if (ib_conn->pi_support) { + init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; + init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS); + } else { + if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) { + init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS); + } else { + init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr); + iser_dbg("device %s supports max_send_wr %d\n", + device->ib_device->name, ib_dev->attrs.max_qp_wr); + } + } + + ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); + if (ret) + goto out_err; + + ib_conn->qp = ib_conn->cma_id->qp; + iser_info("setting conn %p cma_id %p qp %p\n", + ib_conn, ib_conn->cma_id, + ib_conn->cma_id->qp); + return ret; + +out_err: + mutex_lock(&ig.connlist_mutex); + ib_conn->comp->active_qps--; + mutex_unlock(&ig.connlist_mutex); + iser_err("unable to alloc mem or create resource, err %d\n", ret); + + return ret; +} + +/** + * based on the resolved device node GUID see if there already allocated + * device for this device. If there's no such, create one. + */ +static +struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) +{ + struct iser_device *device; + + mutex_lock(&ig.device_list_mutex); + + list_for_each_entry(device, &ig.device_list, ig_list) + /* find if there's a match using the node GUID */ + if (device->ib_device->node_guid == cma_id->device->node_guid) + goto inc_refcnt; + + device = kzalloc(sizeof *device, GFP_KERNEL); + if (device == NULL) + goto out; + + /* assign this device to the device */ + device->ib_device = cma_id->device; + /* init the device and link it into ig device list */ + if (iser_create_device_ib_res(device)) { + kfree(device); + device = NULL; + goto out; + } + list_add(&device->ig_list, &ig.device_list); + +inc_refcnt: + device->refcount++; +out: + mutex_unlock(&ig.device_list_mutex); + return device; +} + +/* if there's no demand for this device, release it */ +static void iser_device_try_release(struct iser_device *device) +{ + mutex_lock(&ig.device_list_mutex); + device->refcount--; + iser_info("device %p refcount %d\n", device, device->refcount); + if (!device->refcount) { + iser_free_device_ib_res(device); + list_del(&device->ig_list); + kfree(device); + } + mutex_unlock(&ig.device_list_mutex); +} + +/** + * Called with state mutex held + **/ +static int iser_conn_state_comp_exch(struct iser_conn *iser_conn, + enum iser_conn_state comp, + enum iser_conn_state exch) +{ + int ret; + + ret = (iser_conn->state == comp); + if (ret) + iser_conn->state = exch; + + return ret; +} + +void iser_release_work(struct work_struct *work) +{ + struct iser_conn *iser_conn; + + iser_conn = container_of(work, struct iser_conn, release_work); + + /* Wait for conn_stop to complete */ + wait_for_completion(&iser_conn->stop_completion); + /* Wait for IB resouces cleanup to complete */ + wait_for_completion(&iser_conn->ib_completion); + + mutex_lock(&iser_conn->state_mutex); + iser_conn->state = ISER_CONN_DOWN; + mutex_unlock(&iser_conn->state_mutex); + + iser_conn_release(iser_conn); +} + +/** + * iser_free_ib_conn_res - release IB related resources + * @iser_conn: iser connection struct + * @destroy: indicator if we need to try to release the + * iser device and memory regoins pool (only iscsi + * shutdown and DEVICE_REMOVAL will use this). + * + * This routine is called with the iser state mutex held + * so the cm_id removal is out of here. It is Safe to + * be invoked multiple times. + */ +static void iser_free_ib_conn_res(struct iser_conn *iser_conn, + bool destroy) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + iser_info("freeing conn %p cma_id %p qp %p\n", + iser_conn, ib_conn->cma_id, ib_conn->qp); + + if (ib_conn->qp != NULL) { + mutex_lock(&ig.connlist_mutex); + ib_conn->comp->active_qps--; + mutex_unlock(&ig.connlist_mutex); + rdma_destroy_qp(ib_conn->cma_id); + ib_conn->qp = NULL; + } + + if (destroy) { + if (iser_conn->rx_descs) + iser_free_rx_descriptors(iser_conn); + + if (device != NULL) { + iser_device_try_release(device); + ib_conn->device = NULL; + } + } +} + +/** + * Frees all conn objects and deallocs conn descriptor + */ +void iser_conn_release(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + + mutex_lock(&ig.connlist_mutex); + list_del(&iser_conn->conn_list); + mutex_unlock(&ig.connlist_mutex); + + mutex_lock(&iser_conn->state_mutex); + /* In case we endup here without ep_disconnect being invoked. */ + if (iser_conn->state != ISER_CONN_DOWN) { + iser_warn("iser conn %p state %d, expected state down.\n", + iser_conn, iser_conn->state); + iscsi_destroy_endpoint(iser_conn->ep); + iser_conn->state = ISER_CONN_DOWN; + } + /* + * In case we never got to bind stage, we still need to + * release IB resources (which is safe to call more than once). + */ + iser_free_ib_conn_res(iser_conn, true); + mutex_unlock(&iser_conn->state_mutex); + + if (ib_conn->cma_id != NULL) { + rdma_destroy_id(ib_conn->cma_id); + ib_conn->cma_id = NULL; + } + + kfree(iser_conn); +} + +/** + * triggers start of the disconnect procedures and wait for them to be done + * Called with state mutex held + */ +int iser_conn_terminate(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + int err = 0; + + /* terminate the iser conn only if the conn state is UP */ + if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, + ISER_CONN_TERMINATING)) + return 0; + + iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state); + + /* suspend queuing of new iscsi commands */ + if (iser_conn->iscsi_conn) + iscsi_suspend_queue(iser_conn->iscsi_conn); + + /* + * In case we didn't already clean up the cma_id (peer initiated + * a disconnection), we need to Cause the CMA to change the QP + * state to ERROR. + */ + if (ib_conn->cma_id) { + err = rdma_disconnect(ib_conn->cma_id); + if (err) + iser_err("Failed to disconnect, conn: 0x%p err %d\n", + iser_conn, err); + + /* block until all flush errors are consumed */ + ib_drain_sq(ib_conn->qp); + } + + return 1; +} + +/** + * Called with state mutex held + **/ +static void iser_connect_error(struct rdma_cm_id *cma_id) +{ + struct iser_conn *iser_conn; + + iser_conn = (struct iser_conn *)cma_id->context; + iser_conn->state = ISER_CONN_TERMINATING; +} + +static void +iser_calc_scsi_params(struct iser_conn *iser_conn, + unsigned int max_sectors) +{ + struct iser_device *device = iser_conn->ib_conn.device; + struct ib_device_attr *attr = &device->ib_device->attrs; + unsigned short sg_tablesize, sup_sg_tablesize; + unsigned short reserved_mr_pages; + + /* + * FRs without SG_GAPS or FMRs can only map up to a (device) page per + * entry, but if the first entry is misaligned we'll end up using two + * entries (head and tail) for a single page worth data, so one + * additional entry is required. + */ + if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) && + (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)) + reserved_mr_pages = 0; + else + reserved_mr_pages = 1; + + sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); + if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) + sup_sg_tablesize = + min_t( + uint, ISCSI_ISER_MAX_SG_TABLESIZE, + attr->max_fast_reg_page_list_len - reserved_mr_pages); + else + sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; + + iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); + iser_conn->pages_per_mr = + iser_conn->scsi_sg_tablesize + reserved_mr_pages; +} + +/** + * Called with state mutex held + **/ +static void iser_addr_handler(struct rdma_cm_id *cma_id) +{ + struct iser_device *device; + struct iser_conn *iser_conn; + struct ib_conn *ib_conn; + int ret; + + iser_conn = (struct iser_conn *)cma_id->context; + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + + ib_conn = &iser_conn->ib_conn; + device = iser_device_find_by_ib_device(cma_id); + if (!device) { + iser_err("device lookup/creation failed\n"); + iser_connect_error(cma_id); + return; + } + + ib_conn->device = device; + + /* connection T10-PI support */ + if (iser_pi_enable) { + if (!(device->ib_device->attrs.device_cap_flags & + IB_DEVICE_SIGNATURE_HANDOVER)) { + iser_warn("T10-PI requested but not supported on %s, " + "continue without T10-PI\n", + ib_conn->device->ib_device->name); + ib_conn->pi_support = false; + } else { + ib_conn->pi_support = true; + } + } + + iser_calc_scsi_params(iser_conn, iser_max_sectors); + + ret = rdma_resolve_route(cma_id, 1000); + if (ret) { + iser_err("resolve route failed: %d\n", ret); + iser_connect_error(cma_id); + return; + } +} + +/** + * Called with state mutex held + **/ +static void iser_route_handler(struct rdma_cm_id *cma_id) +{ + struct rdma_conn_param conn_param; + int ret; + struct iser_cm_hdr req_hdr; + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + + ret = iser_create_ib_conn_res(ib_conn); + if (ret) + goto failure; + + memset(&conn_param, 0, sizeof conn_param); + conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom; + conn_param.initiator_depth = 1; + conn_param.retry_count = 7; + conn_param.rnr_retry_count = 6; + + memset(&req_hdr, 0, sizeof(req_hdr)); + req_hdr.flags = ISER_ZBVA_NOT_SUP; + if (!device->remote_inv_sup) + req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP; + conn_param.private_data = (void *)&req_hdr; + conn_param.private_data_len = sizeof(struct iser_cm_hdr); + + ret = rdma_connect(cma_id, &conn_param); + if (ret) { + iser_err("failure connecting: %d\n", ret); + goto failure; + } + + return; +failure: + iser_connect_error(cma_id); +} + +static void iser_connected_handler(struct rdma_cm_id *cma_id, + const void *private_data) +{ + struct iser_conn *iser_conn; + struct ib_qp_attr attr; + struct ib_qp_init_attr init_attr; + + iser_conn = (struct iser_conn *)cma_id->context; + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + + (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); + iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); + + if (private_data) { + u8 flags = *(u8 *)private_data; + + iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP); + } + + iser_info("conn %p: negotiated %s invalidation\n", + iser_conn, iser_conn->snd_w_inv ? "remote" : "local"); + + iser_conn->state = ISER_CONN_UP; + complete(&iser_conn->up_completion); +} + +static void iser_disconnected_handler(struct rdma_cm_id *cma_id) +{ + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + + if (iser_conn_terminate(iser_conn)) { + if (iser_conn->iscsi_conn) + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + else + iser_err("iscsi_iser connection isn't bound\n"); + } +} + +static void iser_cleanup_handler(struct rdma_cm_id *cma_id, + bool destroy) +{ + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + + /* + * We are not guaranteed that we visited disconnected_handler + * by now, call it here to be safe that we handle CM drep + * and flush errors. + */ + iser_disconnected_handler(cma_id); + iser_free_ib_conn_res(iser_conn, destroy); + complete(&iser_conn->ib_completion); +}; + +static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +{ + struct iser_conn *iser_conn; + int ret = 0; + + iser_conn = (struct iser_conn *)cma_id->context; + iser_info("%s (%d): status %d conn %p id %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id->context, cma_id); + + mutex_lock(&iser_conn->state_mutex); + switch (event->event) { + case RDMA_CM_EVENT_ADDR_RESOLVED: + iser_addr_handler(cma_id); + break; + case RDMA_CM_EVENT_ROUTE_RESOLVED: + iser_route_handler(cma_id); + break; + case RDMA_CM_EVENT_ESTABLISHED: + iser_connected_handler(cma_id, event->param.conn.private_data); + break; + case RDMA_CM_EVENT_REJECTED: + iser_info("Connection rejected: %s\n", + rdma_reject_msg(cma_id, event->status)); + /* FALLTHROUGH */ + case RDMA_CM_EVENT_ADDR_ERROR: + case RDMA_CM_EVENT_ROUTE_ERROR: + case RDMA_CM_EVENT_CONNECT_ERROR: + case RDMA_CM_EVENT_UNREACHABLE: + iser_connect_error(cma_id); + break; + case RDMA_CM_EVENT_DISCONNECTED: + case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + iser_cleanup_handler(cma_id, false); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + /* + * we *must* destroy the device as we cannot rely + * on iscsid to be around to initiate error handling. + * also if we are not in state DOWN implicitly destroy + * the cma_id. + */ + iser_cleanup_handler(cma_id, true); + if (iser_conn->state != ISER_CONN_DOWN) { + iser_conn->ib_conn.cma_id = NULL; + ret = 1; + } + break; + default: + iser_err("Unexpected RDMA CM event: %s (%d)\n", + rdma_event_msg(event->event), event->event); + break; + } + mutex_unlock(&iser_conn->state_mutex); + + return ret; +} + +void iser_conn_init(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + + iser_conn->state = ISER_CONN_INIT; + init_completion(&iser_conn->stop_completion); + init_completion(&iser_conn->ib_completion); + init_completion(&iser_conn->up_completion); + INIT_LIST_HEAD(&iser_conn->conn_list); + mutex_init(&iser_conn->state_mutex); + + ib_conn->post_recv_buf_count = 0; + ib_conn->reg_cqe.done = iser_reg_comp; +} + + /** + * starts the process of connecting to the target + * sleeps until the connection is established or rejected + */ +int iser_connect(struct iser_conn *iser_conn, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, + int non_blocking) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + int err = 0; + + mutex_lock(&iser_conn->state_mutex); + + sprintf(iser_conn->name, "%pISp", dst_addr); + + iser_info("connecting to: %s\n", iser_conn->name); + + /* the device is known only --after-- address resolution */ + ib_conn->device = NULL; + + iser_conn->state = ISER_CONN_PENDING; + + ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, + (void *)iser_conn, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(ib_conn->cma_id)) { + err = PTR_ERR(ib_conn->cma_id); + iser_err("rdma_create_id failed: %d\n", err); + goto id_failure; + } + + err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); + if (err) { + iser_err("rdma_resolve_addr failed: %d\n", err); + goto addr_failure; + } + + if (!non_blocking) { + wait_for_completion_interruptible(&iser_conn->up_completion); + + if (iser_conn->state != ISER_CONN_UP) { + err = -EIO; + goto connect_failure; + } + } + mutex_unlock(&iser_conn->state_mutex); + + mutex_lock(&ig.connlist_mutex); + list_add(&iser_conn->conn_list, &ig.connlist); + mutex_unlock(&ig.connlist_mutex); + return 0; + +id_failure: + ib_conn->cma_id = NULL; +addr_failure: + iser_conn->state = ISER_CONN_DOWN; +connect_failure: + mutex_unlock(&iser_conn->state_mutex); + iser_conn_release(iser_conn); + return err; +} + +int iser_post_recvl(struct iser_conn *iser_conn) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_login_desc *desc = &iser_conn->login_desc; + struct ib_recv_wr wr; + int ib_ret; + + desc->sge.addr = desc->rsp_dma; + desc->sge.length = ISER_RX_LOGIN_SIZE; + desc->sge.lkey = ib_conn->device->pd->local_dma_lkey; + + desc->cqe.done = iser_login_rsp; + wr.wr_cqe = &desc->cqe; + wr.sg_list = &desc->sge; + wr.num_sge = 1; + wr.next = NULL; + + ib_conn->post_recv_buf_count++; + ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL); + if (ib_ret) { + iser_err("ib_post_recv failed ret=%d\n", ib_ret); + ib_conn->post_recv_buf_count--; + } + + return ib_ret; +} + +int iser_post_recvm(struct iser_conn *iser_conn, int count) +{ + struct ib_conn *ib_conn = &iser_conn->ib_conn; + unsigned int my_rx_head = iser_conn->rx_desc_head; + struct iser_rx_desc *rx_desc; + struct ib_recv_wr *wr; + int i, ib_ret; + + for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { + rx_desc = &iser_conn->rx_descs[my_rx_head]; + rx_desc->cqe.done = iser_task_rsp; + wr->wr_cqe = &rx_desc->cqe; + wr->sg_list = &rx_desc->rx_sg; + wr->num_sge = 1; + wr->next = wr + 1; + my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask; + } + + wr--; + wr->next = NULL; /* mark end of work requests list */ + + ib_conn->post_recv_buf_count += count; + ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL); + if (ib_ret) { + iser_err("ib_post_recv failed ret=%d\n", ib_ret); + ib_conn->post_recv_buf_count -= count; + } else + iser_conn->rx_desc_head = my_rx_head; + + return ib_ret; +} + + +/** + * iser_start_send - Initiate a Send DTO operation + * + * returns 0 on success, -1 on failure + */ +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, + bool signal) +{ + struct ib_send_wr *wr = iser_tx_next_wr(tx_desc); + int ib_ret; + + ib_dma_sync_single_for_device(ib_conn->device->ib_device, + tx_desc->dma_addr, ISER_HEADERS_LEN, + DMA_TO_DEVICE); + + wr->next = NULL; + wr->wr_cqe = &tx_desc->cqe; + wr->sg_list = tx_desc->tx_sg; + wr->num_sge = tx_desc->num_sge; + wr->opcode = IB_WR_SEND; + wr->send_flags = signal ? IB_SEND_SIGNALED : 0; + + ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL); + if (ib_ret) + iser_err("ib_post_send failed, ret:%d opcode:%d\n", + ib_ret, wr->opcode); + + return ib_ret; +} + +u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir, sector_t *sector) +{ + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + struct iser_fr_desc *desc = reg->mem_h; + unsigned long sector_size = iser_task->sc->device->sector_size; + struct ib_mr_status mr_status; + int ret; + + if (desc && desc->pi_ctx->sig_protected) { + desc->pi_ctx->sig_protected = 0; + ret = ib_check_mr_status(desc->pi_ctx->sig_mr, + IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + pr_err("ib_check_mr_status failed, ret %d\n", ret); + /* Not a lot we can do, return ambiguous guard error */ + *sector = 0; + return 0x1; + } + + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + sector_t sector_off = mr_status.sig_err.sig_err_offset; + + sector_div(sector_off, sector_size + 8); + *sector = scsi_get_lba(iser_task->sc) + sector_off; + + pr_err("PI error found type %d at sector %llx " + "expected %x vs actual %x\n", + mr_status.sig_err.err_type, + (unsigned long long)*sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); + + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + return 0x1; + case IB_SIG_BAD_REFTAG: + return 0x3; + case IB_SIG_BAD_APPTAG: + return 0x2; + } + } + } + + return 0; +} + +void iser_err_comp(struct ib_wc *wc, const char *type) +{ + if (wc->status != IB_WC_WR_FLUSH_ERR) { + struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context); + + iser_err("%s failure: %s (%d) vend_err %#x\n", type, + ib_wc_status_msg(wc->status), wc->status, + wc->vendor_err); + + if (iser_conn->iscsi_conn) + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + } else { + iser_dbg("%s failure: %s (%d)\n", type, + ib_wc_status_msg(wc->status), wc->status); + } +} diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig new file mode 100644 index 000000000..02f9759eb --- /dev/null +++ b/drivers/infiniband/ulp/isert/Kconfig @@ -0,0 +1,5 @@ +config INFINIBAND_ISERT + tristate "iSCSI Extensions for RDMA (iSER) target support" + depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET + ---help--- + Support for iSCSI Extensions for RDMA (iSER) Target on Infiniband fabrics. diff --git a/drivers/infiniband/ulp/isert/Makefile b/drivers/infiniband/ulp/isert/Makefile new file mode 100644 index 000000000..c8bf2421f --- /dev/null +++ b/drivers/infiniband/ulp/isert/Makefile @@ -0,0 +1,2 @@ +ccflags-y := -Idrivers/target -Idrivers/target/iscsi +obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c new file mode 100644 index 000000000..f39670c5c --- /dev/null +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -0,0 +1,2735 @@ +/******************************************************************************* + * This file contains iSCSI extentions for RDMA (iSER) Verbs + * + * (c) Copyright 2013 Datera, Inc. + * + * Nicholas A. Bellinger <nab@linux-iscsi.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ****************************************************************************/ + +#include <linux/string.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> +#include <linux/semaphore.h> + +#include "ib_isert.h" + +#define ISERT_MAX_CONN 8 +#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) +#define ISER_MAX_TX_CQ_LEN \ + ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN) +#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ + ISERT_MAX_CONN) + +static int isert_debug_level; +module_param_named(debug_level, isert_debug_level, int, 0644); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); + +static DEFINE_MUTEX(device_list_mutex); +static LIST_HEAD(device_list); +static struct workqueue_struct *isert_comp_wq; +static struct workqueue_struct *isert_release_wq; + +static int +isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); +static int +isert_login_post_recv(struct isert_conn *isert_conn); +static int +isert_rdma_accept(struct isert_conn *isert_conn); +struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); + +static void isert_release_work(struct work_struct *work); +static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); +static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); + +static inline bool +isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) +{ + return (conn->pi_support && + cmd->prot_op != TARGET_PROT_NORMAL); +} + + +static void +isert_qp_event_callback(struct ib_event *e, void *context) +{ + struct isert_conn *isert_conn = context; + + isert_err("%s (%d): conn %p\n", + ib_event_msg(e->event), e->event, isert_conn); + + switch (e->event) { + case IB_EVENT_COMM_EST: + rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); + break; + case IB_EVENT_QP_LAST_WQE_REACHED: + isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); + break; + default: + break; + } +} + +static struct isert_comp * +isert_comp_get(struct isert_conn *isert_conn) +{ + struct isert_device *device = isert_conn->device; + struct isert_comp *comp; + int i, min = 0; + + mutex_lock(&device_list_mutex); + for (i = 0; i < device->comps_used; i++) + if (device->comps[i].active_qps < + device->comps[min].active_qps) + min = i; + comp = &device->comps[min]; + comp->active_qps++; + mutex_unlock(&device_list_mutex); + + isert_info("conn %p, using comp %p min_index: %d\n", + isert_conn, comp, min); + + return comp; +} + +static void +isert_comp_put(struct isert_comp *comp) +{ + mutex_lock(&device_list_mutex); + comp->active_qps--; + mutex_unlock(&device_list_mutex); +} + +static struct ib_qp * +isert_create_qp(struct isert_conn *isert_conn, + struct isert_comp *comp, + struct rdma_cm_id *cma_id) +{ + struct isert_device *device = isert_conn->device; + struct ib_qp_init_attr attr; + int ret; + + memset(&attr, 0, sizeof(struct ib_qp_init_attr)); + attr.event_handler = isert_qp_event_callback; + attr.qp_context = isert_conn; + attr.send_cq = comp->cq; + attr.recv_cq = comp->cq; + attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1; + attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; + attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX; + attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge; + attr.cap.max_recv_sge = 1; + attr.sq_sig_type = IB_SIGNAL_REQ_WR; + attr.qp_type = IB_QPT_RC; + if (device->pi_capable) + attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; + + ret = rdma_create_qp(cma_id, device->pd, &attr); + if (ret) { + isert_err("rdma_create_qp failed for cma_id %d\n", ret); + return ERR_PTR(ret); + } + + return cma_id->qp; +} + +static int +isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) +{ + struct isert_comp *comp; + int ret; + + comp = isert_comp_get(isert_conn); + isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); + if (IS_ERR(isert_conn->qp)) { + ret = PTR_ERR(isert_conn->qp); + goto err; + } + + return 0; +err: + isert_comp_put(comp); + return ret; +} + +static int +isert_alloc_rx_descriptors(struct isert_conn *isert_conn) +{ + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + struct iser_rx_desc *rx_desc; + struct ib_sge *rx_sg; + u64 dma_addr; + int i, j; + + isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS, + sizeof(struct iser_rx_desc), + GFP_KERNEL); + if (!isert_conn->rx_descs) + return -ENOMEM; + + rx_desc = isert_conn->rx_descs; + + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { + dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ib_dev, dma_addr)) + goto dma_map_fail; + + rx_desc->dma_addr = dma_addr; + + rx_sg = &rx_desc->rx_sg; + rx_sg->addr = rx_desc->dma_addr; + rx_sg->length = ISER_RX_PAYLOAD_SIZE; + rx_sg->lkey = device->pd->local_dma_lkey; + rx_desc->rx_cqe.done = isert_recv_done; + } + + return 0; + +dma_map_fail: + rx_desc = isert_conn->rx_descs; + for (j = 0; j < i; j++, rx_desc++) { + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + } + kfree(isert_conn->rx_descs); + isert_conn->rx_descs = NULL; + isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); + return -ENOMEM; +} + +static void +isert_free_rx_descriptors(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->device->ib_device; + struct iser_rx_desc *rx_desc; + int i; + + if (!isert_conn->rx_descs) + return; + + rx_desc = isert_conn->rx_descs; + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + } + + kfree(isert_conn->rx_descs); + isert_conn->rx_descs = NULL; +} + +static void +isert_free_comps(struct isert_device *device) +{ + int i; + + for (i = 0; i < device->comps_used; i++) { + struct isert_comp *comp = &device->comps[i]; + + if (comp->cq) + ib_free_cq(comp->cq); + } + kfree(device->comps); +} + +static int +isert_alloc_comps(struct isert_device *device) +{ + int i, max_cqe, ret = 0; + + device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), + device->ib_device->num_comp_vectors)); + + isert_info("Using %d CQs, %s supports %d vectors support " + "pi_capable %d\n", + device->comps_used, device->ib_device->name, + device->ib_device->num_comp_vectors, + device->pi_capable); + + device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), + GFP_KERNEL); + if (!device->comps) + return -ENOMEM; + + max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); + + for (i = 0; i < device->comps_used; i++) { + struct isert_comp *comp = &device->comps[i]; + + comp->device = device; + comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, + IB_POLL_WORKQUEUE); + if (IS_ERR(comp->cq)) { + isert_err("Unable to allocate cq\n"); + ret = PTR_ERR(comp->cq); + comp->cq = NULL; + goto out_cq; + } + } + + return 0; +out_cq: + isert_free_comps(device); + return ret; +} + +static int +isert_create_device_ib_res(struct isert_device *device) +{ + struct ib_device *ib_dev = device->ib_device; + int ret; + + isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n", + ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); + isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); + + ret = isert_alloc_comps(device); + if (ret) + goto out; + + device->pd = ib_alloc_pd(ib_dev, 0); + if (IS_ERR(device->pd)) { + ret = PTR_ERR(device->pd); + isert_err("failed to allocate pd, device %p, ret=%d\n", + device, ret); + goto out_cq; + } + + /* Check signature cap */ + device->pi_capable = ib_dev->attrs.device_cap_flags & + IB_DEVICE_SIGNATURE_HANDOVER ? true : false; + + return 0; + +out_cq: + isert_free_comps(device); +out: + if (ret > 0) + ret = -EINVAL; + return ret; +} + +static void +isert_free_device_ib_res(struct isert_device *device) +{ + isert_info("device %p\n", device); + + ib_dealloc_pd(device->pd); + isert_free_comps(device); +} + +static void +isert_device_put(struct isert_device *device) +{ + mutex_lock(&device_list_mutex); + device->refcount--; + isert_info("device %p refcount %d\n", device, device->refcount); + if (!device->refcount) { + isert_free_device_ib_res(device); + list_del(&device->dev_node); + kfree(device); + } + mutex_unlock(&device_list_mutex); +} + +static struct isert_device * +isert_device_get(struct rdma_cm_id *cma_id) +{ + struct isert_device *device; + int ret; + + mutex_lock(&device_list_mutex); + list_for_each_entry(device, &device_list, dev_node) { + if (device->ib_device->node_guid == cma_id->device->node_guid) { + device->refcount++; + isert_info("Found iser device %p refcount %d\n", + device, device->refcount); + mutex_unlock(&device_list_mutex); + return device; + } + } + + device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); + if (!device) { + mutex_unlock(&device_list_mutex); + return ERR_PTR(-ENOMEM); + } + + INIT_LIST_HEAD(&device->dev_node); + + device->ib_device = cma_id->device; + ret = isert_create_device_ib_res(device); + if (ret) { + kfree(device); + mutex_unlock(&device_list_mutex); + return ERR_PTR(ret); + } + + device->refcount++; + list_add_tail(&device->dev_node, &device_list); + isert_info("Created a new iser device %p refcount %d\n", + device, device->refcount); + mutex_unlock(&device_list_mutex); + + return device; +} + +static void +isert_init_conn(struct isert_conn *isert_conn) +{ + isert_conn->state = ISER_CONN_INIT; + INIT_LIST_HEAD(&isert_conn->node); + init_completion(&isert_conn->login_comp); + init_completion(&isert_conn->login_req_comp); + init_waitqueue_head(&isert_conn->rem_wait); + kref_init(&isert_conn->kref); + mutex_init(&isert_conn->mutex); + INIT_WORK(&isert_conn->release_work, isert_release_work); +} + +static void +isert_free_login_buf(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->device->ib_device; + + ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, + ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); + kfree(isert_conn->login_rsp_buf); + + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, + ISER_RX_PAYLOAD_SIZE, + DMA_FROM_DEVICE); + kfree(isert_conn->login_req_buf); +} + +static int +isert_alloc_login_buf(struct isert_conn *isert_conn, + struct ib_device *ib_dev) +{ + int ret; + + isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), + GFP_KERNEL); + if (!isert_conn->login_req_buf) + return -ENOMEM; + + isert_conn->login_req_dma = ib_dma_map_single(ib_dev, + isert_conn->login_req_buf, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); + if (ret) { + isert_err("login_req_dma mapping error: %d\n", ret); + isert_conn->login_req_dma = 0; + goto out_free_login_req_buf; + } + + isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); + if (!isert_conn->login_rsp_buf) { + ret = -ENOMEM; + goto out_unmap_login_req_buf; + } + + isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, + isert_conn->login_rsp_buf, + ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); + if (ret) { + isert_err("login_rsp_dma mapping error: %d\n", ret); + isert_conn->login_rsp_dma = 0; + goto out_free_login_rsp_buf; + } + + return 0; + +out_free_login_rsp_buf: + kfree(isert_conn->login_rsp_buf); +out_unmap_login_req_buf: + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); +out_free_login_req_buf: + kfree(isert_conn->login_req_buf); + return ret; +} + +static void +isert_set_nego_params(struct isert_conn *isert_conn, + struct rdma_conn_param *param) +{ + struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs; + + /* Set max inflight RDMA READ requests */ + isert_conn->initiator_depth = min_t(u8, param->initiator_depth, + attr->max_qp_init_rd_atom); + isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); + + if (param->private_data) { + u8 flags = *(u8 *)param->private_data; + + /* + * use remote invalidation if the both initiator + * and the HCA support it + */ + isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) && + (attr->device_cap_flags & + IB_DEVICE_MEM_MGT_EXTENSIONS); + if (isert_conn->snd_w_inv) + isert_info("Using remote invalidation\n"); + } +} + +static int +isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +{ + struct isert_np *isert_np = cma_id->context; + struct iscsi_np *np = isert_np->np; + struct isert_conn *isert_conn; + struct isert_device *device; + int ret = 0; + + spin_lock_bh(&np->np_thread_lock); + if (!np->enabled) { + spin_unlock_bh(&np->np_thread_lock); + isert_dbg("iscsi_np is not enabled, reject connect request\n"); + return rdma_reject(cma_id, NULL, 0); + } + spin_unlock_bh(&np->np_thread_lock); + + isert_dbg("cma_id: %p, portal: %p\n", + cma_id, cma_id->context); + + isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); + if (!isert_conn) + return -ENOMEM; + + isert_init_conn(isert_conn); + isert_conn->cm_id = cma_id; + + ret = isert_alloc_login_buf(isert_conn, cma_id->device); + if (ret) + goto out; + + device = isert_device_get(cma_id); + if (IS_ERR(device)) { + ret = PTR_ERR(device); + goto out_rsp_dma_map; + } + isert_conn->device = device; + + isert_set_nego_params(isert_conn, &event->param.conn); + + ret = isert_conn_setup_qp(isert_conn, cma_id); + if (ret) + goto out_conn_dev; + + ret = isert_login_post_recv(isert_conn); + if (ret) + goto out_conn_dev; + + ret = isert_rdma_accept(isert_conn); + if (ret) + goto out_conn_dev; + + mutex_lock(&isert_np->mutex); + list_add_tail(&isert_conn->node, &isert_np->accepted); + mutex_unlock(&isert_np->mutex); + + return 0; + +out_conn_dev: + isert_device_put(device); +out_rsp_dma_map: + isert_free_login_buf(isert_conn); +out: + kfree(isert_conn); + rdma_reject(cma_id, NULL, 0); + return ret; +} + +static void +isert_connect_release(struct isert_conn *isert_conn) +{ + struct isert_device *device = isert_conn->device; + + isert_dbg("conn %p\n", isert_conn); + + BUG_ON(!device); + + isert_free_rx_descriptors(isert_conn); + if (isert_conn->cm_id && + !isert_conn->dev_removed) + rdma_destroy_id(isert_conn->cm_id); + + if (isert_conn->qp) { + struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; + + isert_comp_put(comp); + ib_destroy_qp(isert_conn->qp); + } + + if (isert_conn->login_req_buf) + isert_free_login_buf(isert_conn); + + isert_device_put(device); + + if (isert_conn->dev_removed) + wake_up_interruptible(&isert_conn->rem_wait); + else + kfree(isert_conn); +} + +static void +isert_connected_handler(struct rdma_cm_id *cma_id) +{ + struct isert_conn *isert_conn = cma_id->qp->qp_context; + struct isert_np *isert_np = cma_id->context; + + isert_info("conn %p\n", isert_conn); + + mutex_lock(&isert_conn->mutex); + isert_conn->state = ISER_CONN_UP; + kref_get(&isert_conn->kref); + mutex_unlock(&isert_conn->mutex); + + mutex_lock(&isert_np->mutex); + list_move_tail(&isert_conn->node, &isert_np->pending); + mutex_unlock(&isert_np->mutex); + + isert_info("np %p: Allow accept_np to continue\n", isert_np); + up(&isert_np->sem); +} + +static void +isert_release_kref(struct kref *kref) +{ + struct isert_conn *isert_conn = container_of(kref, + struct isert_conn, kref); + + isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, + current->pid); + + isert_connect_release(isert_conn); +} + +static void +isert_put_conn(struct isert_conn *isert_conn) +{ + kref_put(&isert_conn->kref, isert_release_kref); +} + +static void +isert_handle_unbound_conn(struct isert_conn *isert_conn) +{ + struct isert_np *isert_np = isert_conn->cm_id->context; + + mutex_lock(&isert_np->mutex); + if (!list_empty(&isert_conn->node)) { + /* + * This means iscsi doesn't know this connection + * so schedule a cleanup ourselves + */ + list_del_init(&isert_conn->node); + isert_put_conn(isert_conn); + queue_work(isert_release_wq, &isert_conn->release_work); + } + mutex_unlock(&isert_np->mutex); +} + +/** + * isert_conn_terminate() - Initiate connection termination + * @isert_conn: isert connection struct + * + * Notes: + * In case the connection state is BOUND, move state + * to TEMINATING and start teardown sequence (rdma_disconnect). + * In case the connection state is UP, complete flush as well. + * + * This routine must be called with mutex held. Thus it is + * safe to call multiple times. + */ +static void +isert_conn_terminate(struct isert_conn *isert_conn) +{ + int err; + + if (isert_conn->state >= ISER_CONN_TERMINATING) + return; + + isert_info("Terminating conn %p state %d\n", + isert_conn, isert_conn->state); + isert_conn->state = ISER_CONN_TERMINATING; + err = rdma_disconnect(isert_conn->cm_id); + if (err) + isert_warn("Failed rdma_disconnect isert_conn %p\n", + isert_conn); +} + +static int +isert_np_cma_handler(struct isert_np *isert_np, + enum rdma_cm_event_type event) +{ + isert_dbg("%s (%d): isert np %p\n", + rdma_event_msg(event), event, isert_np); + + switch (event) { + case RDMA_CM_EVENT_DEVICE_REMOVAL: + isert_np->cm_id = NULL; + break; + case RDMA_CM_EVENT_ADDR_CHANGE: + isert_np->cm_id = isert_setup_id(isert_np); + if (IS_ERR(isert_np->cm_id)) { + isert_err("isert np %p setup id failed: %ld\n", + isert_np, PTR_ERR(isert_np->cm_id)); + isert_np->cm_id = NULL; + } + break; + default: + isert_err("isert np %p Unexpected event %d\n", + isert_np, event); + } + + return -1; +} + +static int +isert_disconnected_handler(struct rdma_cm_id *cma_id, + enum rdma_cm_event_type event) +{ + struct isert_conn *isert_conn = cma_id->qp->qp_context; + + mutex_lock(&isert_conn->mutex); + switch (isert_conn->state) { + case ISER_CONN_TERMINATING: + break; + case ISER_CONN_UP: + isert_conn_terminate(isert_conn); + ib_drain_qp(isert_conn->qp); + isert_handle_unbound_conn(isert_conn); + break; + case ISER_CONN_BOUND: + case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + break; + default: + isert_warn("conn %p terminating in state %d\n", + isert_conn, isert_conn->state); + } + mutex_unlock(&isert_conn->mutex); + + return 0; +} + +static int +isert_connect_error(struct rdma_cm_id *cma_id) +{ + struct isert_conn *isert_conn = cma_id->qp->qp_context; + + ib_drain_qp(isert_conn->qp); + list_del_init(&isert_conn->node); + isert_conn->cm_id = NULL; + isert_put_conn(isert_conn); + + return -1; +} + +static int +isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +{ + struct isert_np *isert_np = cma_id->context; + struct isert_conn *isert_conn; + int ret = 0; + + isert_info("%s (%d): status %d id %p np %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id, cma_id->context); + + if (isert_np->cm_id == cma_id) + return isert_np_cma_handler(cma_id->context, event->event); + + switch (event->event) { + case RDMA_CM_EVENT_CONNECT_REQUEST: + ret = isert_connect_request(cma_id, event); + if (ret) + isert_err("failed handle connect request %d\n", ret); + break; + case RDMA_CM_EVENT_ESTABLISHED: + isert_connected_handler(cma_id); + break; + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ + ret = isert_disconnected_handler(cma_id, event->event); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + isert_conn = cma_id->qp->qp_context; + isert_conn->dev_removed = true; + isert_disconnected_handler(cma_id, event->event); + wait_event_interruptible(isert_conn->rem_wait, + isert_conn->state == ISER_CONN_DOWN); + kfree(isert_conn); + /* + * return non-zero from the callback to destroy + * the rdma cm id + */ + return 1; + case RDMA_CM_EVENT_REJECTED: + isert_info("Connection rejected: %s\n", + rdma_reject_msg(cma_id, event->status)); + /* fall through */ + case RDMA_CM_EVENT_UNREACHABLE: + case RDMA_CM_EVENT_CONNECT_ERROR: + ret = isert_connect_error(cma_id); + break; + default: + isert_err("Unhandled RDMA CMA event: %d\n", event->event); + break; + } + + return ret; +} + +static int +isert_post_recvm(struct isert_conn *isert_conn, u32 count) +{ + struct ib_recv_wr *rx_wr; + int i, ret; + struct iser_rx_desc *rx_desc; + + for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { + rx_desc = &isert_conn->rx_descs[i]; + + rx_wr->wr_cqe = &rx_desc->rx_cqe; + rx_wr->sg_list = &rx_desc->rx_sg; + rx_wr->num_sge = 1; + rx_wr->next = rx_wr + 1; + rx_desc->in_use = false; + } + rx_wr--; + rx_wr->next = NULL; /* mark end of work requests list */ + + ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL); + if (ret) + isert_err("ib_post_recv() failed with ret: %d\n", ret); + + return ret; +} + +static int +isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) +{ + struct ib_recv_wr rx_wr; + int ret; + + if (!rx_desc->in_use) { + /* + * if the descriptor is not in-use we already reposted it + * for recv, so just silently return + */ + return 0; + } + + rx_desc->in_use = false; + rx_wr.wr_cqe = &rx_desc->rx_cqe; + rx_wr.sg_list = &rx_desc->rx_sg; + rx_wr.num_sge = 1; + rx_wr.next = NULL; + + ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); + if (ret) + isert_err("ib_post_recv() failed with ret: %d\n", ret); + + return ret; +} + +static int +isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) +{ + struct ib_device *ib_dev = isert_conn->cm_id->device; + struct ib_send_wr send_wr; + int ret; + + ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + + tx_desc->tx_cqe.done = isert_login_send_done; + + send_wr.next = NULL; + send_wr.wr_cqe = &tx_desc->tx_cqe; + send_wr.sg_list = tx_desc->tx_sg; + send_wr.num_sge = tx_desc->num_sge; + send_wr.opcode = IB_WR_SEND; + send_wr.send_flags = IB_SEND_SIGNALED; + + ret = ib_post_send(isert_conn->qp, &send_wr, NULL); + if (ret) + isert_err("ib_post_send() failed, ret: %d\n", ret); + + return ret; +} + +static void +__isert_create_send_desc(struct isert_device *device, + struct iser_tx_desc *tx_desc) +{ + + memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); + tx_desc->iser_header.flags = ISCSI_CTRL; + + tx_desc->num_sge = 1; + + if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; + isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); + } +} + +static void +isert_create_send_desc(struct isert_conn *isert_conn, + struct isert_cmd *isert_cmd, + struct iser_tx_desc *tx_desc) +{ + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + + ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + + __isert_create_send_desc(device, tx_desc); +} + +static int +isert_init_tx_hdrs(struct isert_conn *isert_conn, + struct iser_tx_desc *tx_desc) +{ + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + u64 dma_addr; + + dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + if (ib_dma_mapping_error(ib_dev, dma_addr)) { + isert_err("ib_dma_mapping_error() failed\n"); + return -ENOMEM; + } + + tx_desc->dma_addr = dma_addr; + tx_desc->tx_sg[0].addr = tx_desc->dma_addr; + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; + tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; + + isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", + tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, + tx_desc->tx_sg[0].lkey); + + return 0; +} + +static void +isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + struct ib_send_wr *send_wr) +{ + struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; + + tx_desc->tx_cqe.done = isert_send_done; + send_wr->wr_cqe = &tx_desc->tx_cqe; + + if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { + send_wr->opcode = IB_WR_SEND_WITH_INV; + send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; + } else { + send_wr->opcode = IB_WR_SEND; + } + + send_wr->sg_list = &tx_desc->tx_sg[0]; + send_wr->num_sge = isert_cmd->tx_desc.num_sge; + send_wr->send_flags = IB_SEND_SIGNALED; +} + +static int +isert_login_post_recv(struct isert_conn *isert_conn) +{ + struct ib_recv_wr rx_wr; + struct ib_sge sge; + int ret; + + memset(&sge, 0, sizeof(struct ib_sge)); + sge.addr = isert_conn->login_req_dma; + sge.length = ISER_RX_PAYLOAD_SIZE; + sge.lkey = isert_conn->device->pd->local_dma_lkey; + + isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", + sge.addr, sge.length, sge.lkey); + + isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; + + memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); + rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; + rx_wr.sg_list = &sge; + rx_wr.num_sge = 1; + + ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL); + if (ret) + isert_err("ib_post_recv() failed: %d\n", ret); + + return ret; +} + +static int +isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, + u32 length) +{ + struct isert_conn *isert_conn = conn->context; + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; + int ret; + + __isert_create_send_desc(device, tx_desc); + + memcpy(&tx_desc->iscsi_header, &login->rsp[0], + sizeof(struct iscsi_hdr)); + + isert_init_tx_hdrs(isert_conn, tx_desc); + + if (length > 0) { + struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; + + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, + length, DMA_TO_DEVICE); + + memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); + + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, + length, DMA_TO_DEVICE); + + tx_dsg->addr = isert_conn->login_rsp_dma; + tx_dsg->length = length; + tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey; + tx_desc->num_sge = 2; + } + if (!login->login_failed) { + if (login->login_complete) { + ret = isert_alloc_rx_descriptors(isert_conn); + if (ret) + return ret; + + ret = isert_post_recvm(isert_conn, + ISERT_QP_MAX_RECV_DTOS); + if (ret) + return ret; + + /* Now we are in FULL_FEATURE phase */ + mutex_lock(&isert_conn->mutex); + isert_conn->state = ISER_CONN_FULL_FEATURE; + mutex_unlock(&isert_conn->mutex); + goto post_send; + } + + ret = isert_login_post_recv(isert_conn); + if (ret) + return ret; + } +post_send: + ret = isert_login_post_send(isert_conn, tx_desc); + if (ret) + return ret; + + return 0; +} + +static void +isert_rx_login_req(struct isert_conn *isert_conn) +{ + struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; + int rx_buflen = isert_conn->login_req_len; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_login *login = conn->conn_login; + int size; + + isert_info("conn %p\n", isert_conn); + + WARN_ON_ONCE(!login); + + if (login->first_request) { + struct iscsi_login_req *login_req = + (struct iscsi_login_req *)&rx_desc->iscsi_header; + /* + * Setup the initial iscsi_login values from the leading + * login request PDU. + */ + login->leading_connection = (!login_req->tsih) ? 1 : 0; + login->current_stage = + (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) + >> 2; + login->version_min = login_req->min_version; + login->version_max = login_req->max_version; + memcpy(login->isid, login_req->isid, 6); + login->cmd_sn = be32_to_cpu(login_req->cmdsn); + login->init_task_tag = login_req->itt; + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); + login->cid = be16_to_cpu(login_req->cid); + login->tsih = be16_to_cpu(login_req->tsih); + } + + memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); + + size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); + isert_dbg("Using login payload size: %d, rx_buflen: %d " + "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, + MAX_KEY_VALUE_PAIRS); + memcpy(login->req_buf, &rx_desc->data[0], size); + + if (login->first_request) { + complete(&isert_conn->login_comp); + return; + } + schedule_delayed_work(&conn->login_work, 0); +} + +static struct iscsi_cmd +*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc) +{ + struct isert_conn *isert_conn = conn->context; + struct isert_cmd *isert_cmd; + struct iscsi_cmd *cmd; + + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) { + isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); + return NULL; + } + isert_cmd = iscsit_priv_cmd(cmd); + isert_cmd->conn = isert_conn; + isert_cmd->iscsi_cmd = cmd; + isert_cmd->rx_desc = rx_desc; + + return cmd; +} + +static int +isert_handle_scsi_cmd(struct isert_conn *isert_conn, + struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, + struct iser_rx_desc *rx_desc, unsigned char *buf) +{ + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; + int imm_data, imm_data_len, unsol_data, sg_nents, rc; + bool dump_payload = false; + unsigned int data_len; + + rc = iscsit_setup_scsi_cmd(conn, cmd, buf); + if (rc < 0) + return rc; + + imm_data = cmd->immediate_data; + imm_data_len = cmd->first_burst_len; + unsol_data = cmd->unsolicited_data; + data_len = cmd->se_cmd.data_length; + + if (imm_data && imm_data_len == data_len) + cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); + if (rc < 0) { + return 0; + } else if (rc > 0) { + dump_payload = true; + goto sequence_cmd; + } + + if (!imm_data) + return 0; + + if (imm_data_len != data_len) { + sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); + sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, + &rx_desc->data[0], imm_data_len); + isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", + sg_nents, imm_data_len); + } else { + sg_init_table(&isert_cmd->sg, 1); + cmd->se_cmd.t_data_sg = &isert_cmd->sg; + cmd->se_cmd.t_data_nents = 1; + sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); + isert_dbg("Transfer Immediate imm_data_len: %d\n", + imm_data_len); + } + + cmd->write_data_done += imm_data_len; + + if (cmd->write_data_done == cmd->se_cmd.data_length) { + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + } + +sequence_cmd: + rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); + + if (!rc && dump_payload == false && unsol_data) + iscsit_set_unsoliticed_dataout(cmd); + else if (dump_payload && imm_data) + target_put_sess_cmd(&cmd->se_cmd); + + return 0; +} + +static int +isert_handle_iscsi_dataout(struct isert_conn *isert_conn, + struct iser_rx_desc *rx_desc, unsigned char *buf) +{ + struct scatterlist *sg_start; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_cmd *cmd = NULL; + struct iscsi_data *hdr = (struct iscsi_data *)buf; + u32 unsol_data_len = ntoh24(hdr->dlength); + int rc, sg_nents, sg_off, page_off; + + rc = iscsit_check_dataout_hdr(conn, buf, &cmd); + if (rc < 0) + return rc; + else if (!cmd) + return 0; + /* + * FIXME: Unexpected unsolicited_data out + */ + if (!cmd->unsolicited_data) { + isert_err("Received unexpected solicited data payload\n"); + dump_stack(); + return -1; + } + + isert_dbg("Unsolicited DataOut unsol_data_len: %u, " + "write_data_done: %u, data_length: %u\n", + unsol_data_len, cmd->write_data_done, + cmd->se_cmd.data_length); + + sg_off = cmd->write_data_done / PAGE_SIZE; + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; + sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); + page_off = cmd->write_data_done % PAGE_SIZE; + /* + * FIXME: Non page-aligned unsolicited_data out + */ + if (page_off) { + isert_err("unexpected non-page aligned data payload\n"); + dump_stack(); + return -1; + } + isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " + "sg_nents: %u from %p %u\n", sg_start, sg_off, + sg_nents, &rx_desc->data[0], unsol_data_len); + + sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], + unsol_data_len); + + rc = iscsit_check_dataout_payload(cmd, hdr, false); + if (rc < 0) + return rc; + + /* + * multiple data-outs on the same command can arrive - + * so post the buffer before hand + */ + rc = isert_post_recv(isert_conn, rx_desc); + if (rc) { + isert_err("ib_post_recv failed with %d\n", rc); + return rc; + } + return 0; +} + +static int +isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, + unsigned char *buf) +{ + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; + int rc; + + rc = iscsit_setup_nop_out(conn, cmd, hdr); + if (rc < 0) + return rc; + /* + * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload + */ + + return iscsit_process_nop_out(conn, cmd, hdr); +} + +static int +isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, + struct iscsi_text *hdr) +{ + struct iscsi_conn *conn = isert_conn->conn; + u32 payload_length = ntoh24(hdr->dlength); + int rc; + unsigned char *text_in = NULL; + + rc = iscsit_setup_text_cmd(conn, cmd, hdr); + if (rc < 0) + return rc; + + if (payload_length) { + text_in = kzalloc(payload_length, GFP_KERNEL); + if (!text_in) + return -ENOMEM; + } + cmd->text_in_ptr = text_in; + + memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); + + return iscsit_process_text_cmd(conn, cmd, hdr); +} + +static int +isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, + uint32_t read_stag, uint64_t read_va, + uint32_t write_stag, uint64_t write_va) +{ + struct iscsi_hdr *hdr = &rx_desc->iscsi_header; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_cmd *cmd; + struct isert_cmd *isert_cmd; + int ret = -EINVAL; + u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); + + if (conn->sess->sess_ops->SessionType && + (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { + isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," + " ignoring\n", opcode); + return 0; + } + + switch (opcode) { + case ISCSI_OP_SCSI_CMD: + cmd = isert_allocate_cmd(conn, rx_desc); + if (!cmd) + break; + + isert_cmd = iscsit_priv_cmd(cmd); + isert_cmd->read_stag = read_stag; + isert_cmd->read_va = read_va; + isert_cmd->write_stag = write_stag; + isert_cmd->write_va = write_va; + isert_cmd->inv_rkey = read_stag ? read_stag : write_stag; + + ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd, + rx_desc, (unsigned char *)hdr); + break; + case ISCSI_OP_NOOP_OUT: + cmd = isert_allocate_cmd(conn, rx_desc); + if (!cmd) + break; + + isert_cmd = iscsit_priv_cmd(cmd); + ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd, + rx_desc, (unsigned char *)hdr); + break; + case ISCSI_OP_SCSI_DATA_OUT: + ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, + (unsigned char *)hdr); + break; + case ISCSI_OP_SCSI_TMFUNC: + cmd = isert_allocate_cmd(conn, rx_desc); + if (!cmd) + break; + + ret = iscsit_handle_task_mgt_cmd(conn, cmd, + (unsigned char *)hdr); + break; + case ISCSI_OP_LOGOUT: + cmd = isert_allocate_cmd(conn, rx_desc); + if (!cmd) + break; + + ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); + break; + case ISCSI_OP_TEXT: + if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) + cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); + else + cmd = isert_allocate_cmd(conn, rx_desc); + + if (!cmd) + break; + + isert_cmd = iscsit_priv_cmd(cmd); + ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, + rx_desc, (struct iscsi_text *)hdr); + break; + default: + isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); + dump_stack(); + break; + } + + return ret; +} + +static void +isert_print_wc(struct ib_wc *wc, const char *type) +{ + if (wc->status != IB_WC_WR_FLUSH_ERR) + isert_err("%s failure: %s (%d) vend_err %x\n", type, + ib_wc_status_msg(wc->status), wc->status, + wc->vendor_err); + else + isert_dbg("%s failure: %s (%d)\n", type, + ib_wc_status_msg(wc->status), wc->status); +} + +static void +isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct isert_conn *isert_conn = wc->qp->qp_context; + struct ib_device *ib_dev = isert_conn->cm_id->device; + struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); + struct iscsi_hdr *hdr = &rx_desc->iscsi_header; + struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; + uint64_t read_va = 0, write_va = 0; + uint32_t read_stag = 0, write_stag = 0; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "recv"); + if (wc->status != IB_WC_WR_FLUSH_ERR) + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + return; + } + + rx_desc->in_use = true; + + ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + + isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", + rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, + (int)(wc->byte_len - ISER_HEADERS_LEN)); + + switch (iser_ctrl->flags & 0xF0) { + case ISCSI_CTRL: + if (iser_ctrl->flags & ISER_RSV) { + read_stag = be32_to_cpu(iser_ctrl->read_stag); + read_va = be64_to_cpu(iser_ctrl->read_va); + isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", + read_stag, (unsigned long long)read_va); + } + if (iser_ctrl->flags & ISER_WSV) { + write_stag = be32_to_cpu(iser_ctrl->write_stag); + write_va = be64_to_cpu(iser_ctrl->write_va); + isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", + write_stag, (unsigned long long)write_va); + } + + isert_dbg("ISER ISCSI_CTRL PDU\n"); + break; + case ISER_HELLO: + isert_err("iSER Hello message\n"); + break; + default: + isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags); + break; + } + + isert_rx_opcode(isert_conn, rx_desc, + read_stag, read_va, write_stag, write_va); + + ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); +} + +static void +isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct isert_conn *isert_conn = wc->qp->qp_context; + struct ib_device *ib_dev = isert_conn->device->ib_device; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "login recv"); + return; + } + + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + + isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; + + if (isert_conn->conn) { + struct iscsi_login *login = isert_conn->conn->conn_login; + + if (login && !login->first_request) + isert_rx_login_req(isert_conn); + } + + mutex_lock(&isert_conn->mutex); + complete(&isert_conn->login_req_comp); + mutex_unlock(&isert_conn->mutex); + + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); +} + +static void +isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn) +{ + struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; + enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); + + if (!cmd->rw.nr_ops) + return; + + if (isert_prot_cmd(conn, se_cmd)) { + rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp, + conn->cm_id->port_num, se_cmd->t_data_sg, + se_cmd->t_data_nents, se_cmd->t_prot_sg, + se_cmd->t_prot_nents, dir); + } else { + rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num, + se_cmd->t_data_sg, se_cmd->t_data_nents, dir); + } + + cmd->rw.nr_ops = 0; +} + +static void +isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) +{ + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + struct isert_conn *isert_conn = isert_cmd->conn; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_text_rsp *hdr; + + isert_dbg("Cmd %p\n", isert_cmd); + + switch (cmd->iscsi_opcode) { + case ISCSI_OP_SCSI_CMD: + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + if (cmd->data_direction == DMA_TO_DEVICE) { + iscsit_stop_dataout_timer(cmd); + /* + * Check for special case during comp_err where + * WRITE_PENDING has been handed off from core, + * but requires an extra target_put_sess_cmd() + * before transport_generic_free_cmd() below. + */ + if (comp_err && + cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { + struct se_cmd *se_cmd = &cmd->se_cmd; + + target_put_sess_cmd(se_cmd); + } + } + + isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); + transport_generic_free_cmd(&cmd->se_cmd, 0); + break; + case ISCSI_OP_SCSI_TMFUNC: + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + transport_generic_free_cmd(&cmd->se_cmd, 0); + break; + case ISCSI_OP_REJECT: + case ISCSI_OP_NOOP_OUT: + case ISCSI_OP_TEXT: + hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; + /* If the continue bit is on, keep the command alive */ + if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) + break; + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + /* + * Handle special case for REJECT when iscsi_add_reject*() has + * overwritten the original iscsi_opcode assignment, and the + * associated cmd->se_cmd needs to be released. + */ + if (cmd->se_cmd.se_tfo != NULL) { + isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", + cmd->iscsi_opcode); + transport_generic_free_cmd(&cmd->se_cmd, 0); + break; + } + /* fall through */ + default: + iscsit_release_cmd(cmd); + break; + } +} + +static void +isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) +{ + if (tx_desc->dma_addr != 0) { + isert_dbg("unmap single for tx_desc->dma_addr\n"); + ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + tx_desc->dma_addr = 0; + } +} + +static void +isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, + struct ib_device *ib_dev, bool comp_err) +{ + if (isert_cmd->pdu_buf_dma != 0) { + isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); + ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, + isert_cmd->pdu_buf_len, DMA_TO_DEVICE); + isert_cmd->pdu_buf_dma = 0; + } + + isert_unmap_tx_desc(tx_desc, ib_dev); + isert_put_cmd(isert_cmd, comp_err); +} + +static int +isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) +{ + struct ib_mr_status mr_status; + int ret; + + ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + isert_err("ib_check_mr_status failed, ret %d\n", ret); + goto fail_mr_status; + } + + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + u64 sec_offset_err; + u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; + + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case IB_SIG_BAD_REFTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case IB_SIG_BAD_APPTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + } + sec_offset_err = mr_status.sig_err.sig_err_offset; + do_div(sec_offset_err, block_size); + se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; + + isert_err("PI error found type %d at sector 0x%llx " + "expected 0x%x vs actual 0x%x\n", + mr_status.sig_err.err_type, + (unsigned long long)se_cmd->bad_sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); + ret = 1; + } + +fail_mr_status: + return ret; +} + +static void +isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct isert_conn *isert_conn = wc->qp->qp_context; + struct isert_device *device = isert_conn->device; + struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); + struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); + struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; + int ret = 0; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "rdma write"); + if (wc->status != IB_WC_WR_FLUSH_ERR) + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + isert_completion_put(desc, isert_cmd, device->ib_device, true); + return; + } + + isert_dbg("Cmd %p\n", isert_cmd); + + ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); + isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); + + if (ret) { + /* + * transport_generic_request_failure() expects to have + * plus two references to handle queue-full, so re-add + * one here as target-core will have already dropped + * it after the first isert_put_datain() callback. + */ + kref_get(&cmd->cmd_kref); + transport_generic_request_failure(cmd, cmd->pi_err); + } else { + /* + * XXX: isert_put_response() failure is not retried. + */ + ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); + if (ret) + pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); + } +} + +static void +isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct isert_conn *isert_conn = wc->qp->qp_context; + struct isert_device *device = isert_conn->device; + struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); + struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + struct se_cmd *se_cmd = &cmd->se_cmd; + int ret = 0; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "rdma read"); + if (wc->status != IB_WC_WR_FLUSH_ERR) + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + isert_completion_put(desc, isert_cmd, device->ib_device, true); + return; + } + + isert_dbg("Cmd %p\n", isert_cmd); + + iscsit_stop_dataout_timer(cmd); + + if (isert_prot_cmd(isert_conn, se_cmd)) + ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr); + isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); + cmd->write_data_done = 0; + + isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + + /* + * transport_generic_request_failure() will drop the extra + * se_cmd->cmd_kref reference after T10-PI error, and handle + * any non-zero ->queue_status() callback error retries. + */ + if (ret) + transport_generic_request_failure(se_cmd, se_cmd->pi_err); + else + target_execute_cmd(se_cmd); +} + +static void +isert_do_control_comp(struct work_struct *work) +{ + struct isert_cmd *isert_cmd = container_of(work, + struct isert_cmd, comp_work); + struct isert_conn *isert_conn = isert_cmd->conn; + struct ib_device *ib_dev = isert_conn->cm_id->device; + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + + isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); + + switch (cmd->i_state) { + case ISTATE_SEND_TASKMGTRSP: + iscsit_tmr_post_handler(cmd, cmd->conn); + /* fall through */ + case ISTATE_SEND_REJECT: + case ISTATE_SEND_TEXTRSP: + cmd->i_state = ISTATE_SENT_STATUS; + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, + ib_dev, false); + break; + case ISTATE_SEND_LOGOUTRSP: + iscsit_logout_post_handler(cmd, cmd->conn); + break; + default: + isert_err("Unknown i_state %d\n", cmd->i_state); + dump_stack(); + break; + } +} + +static void +isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct isert_conn *isert_conn = wc->qp->qp_context; + struct ib_device *ib_dev = isert_conn->cm_id->device; + struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "login send"); + if (wc->status != IB_WC_WR_FLUSH_ERR) + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + } + + isert_unmap_tx_desc(tx_desc, ib_dev); +} + +static void +isert_send_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct isert_conn *isert_conn = wc->qp->qp_context; + struct ib_device *ib_dev = isert_conn->cm_id->device; + struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); + struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc); + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "send"); + if (wc->status != IB_WC_WR_FLUSH_ERR) + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + isert_completion_put(tx_desc, isert_cmd, ib_dev, true); + return; + } + + isert_dbg("Cmd %p\n", isert_cmd); + + switch (isert_cmd->iscsi_cmd->i_state) { + case ISTATE_SEND_TASKMGTRSP: + case ISTATE_SEND_LOGOUTRSP: + case ISTATE_SEND_REJECT: + case ISTATE_SEND_TEXTRSP: + isert_unmap_tx_desc(tx_desc, ib_dev); + + INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); + queue_work(isert_comp_wq, &isert_cmd->comp_work); + return; + default: + isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS; + isert_completion_put(tx_desc, isert_cmd, ib_dev, false); + break; + } +} + +static int +isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) +{ + int ret; + + ret = isert_post_recv(isert_conn, isert_cmd->rx_desc); + if (ret) { + isert_err("ib_post_recv failed with %d\n", ret); + return ret; + } + + ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL); + if (ret) { + isert_err("ib_post_send failed with %d\n", ret); + return ret; + } + return ret; +} + +static int +isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) + &isert_cmd->tx_desc.iscsi_header; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_rsp_pdu(cmd, conn, true, hdr); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + /* + * Attach SENSE DATA payload to iSCSI Response PDU + */ + if (cmd->se_cmd.sense_buffer && + ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || + (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; + u32 padding, pdu_len; + + put_unaligned_be16(cmd->se_cmd.scsi_sense_length, + cmd->sense_buffer); + cmd->se_cmd.scsi_sense_length += sizeof(__be16); + + padding = -(cmd->se_cmd.scsi_sense_length) & 3; + hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); + pdu_len = cmd->se_cmd.scsi_sense_length + padding; + + isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, + (void *)cmd->sense_buffer, pdu_len, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) + return -ENOMEM; + + isert_cmd->pdu_buf_len = pdu_len; + tx_dsg->addr = isert_cmd->pdu_buf_dma; + tx_dsg->length = pdu_len; + tx_dsg->lkey = device->pd->local_dma_lkey; + isert_cmd->tx_desc.num_sge = 2; + } + + isert_init_send_wr(isert_conn, isert_cmd, send_wr); + + isert_dbg("Posting SCSI Response\n"); + + return isert_post_response(isert_conn, isert_cmd); +} + +static void +isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); + isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); +} + +static enum target_prot_op +isert_get_sup_prot_ops(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = conn->context; + struct isert_device *device = isert_conn->device; + + if (conn->tpg->tpg_attrib.t10_pi) { + if (device->pi_capable) { + isert_info("conn %p PI offload enabled\n", isert_conn); + isert_conn->pi_support = true; + return TARGET_PROT_ALL; + } + } + + isert_info("conn %p PI offload disabled\n", isert_conn); + isert_conn->pi_support = false; + + return TARGET_PROT_NORMAL; +} + +static int +isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + bool nopout_response) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) + &isert_cmd->tx_desc.iscsi_header, + nopout_response); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); + + isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); + + isert_dbg("conn %p Posting Logout Response\n", isert_conn); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); + + isert_dbg("conn %p Posting Task Management Response\n", isert_conn); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; + struct iscsi_reject *hdr = + (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_reject(cmd, conn, hdr); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + + hton24(hdr->dlength, ISCSI_HDR_LEN); + isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, + (void *)cmd->buf_ptr, ISCSI_HDR_LEN, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) + return -ENOMEM; + isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; + tx_dsg->addr = isert_cmd->pdu_buf_dma; + tx_dsg->length = ISCSI_HDR_LEN; + tx_dsg->lkey = device->pd->local_dma_lkey; + isert_cmd->tx_desc.num_sge = 2; + + isert_init_send_wr(isert_conn, isert_cmd, send_wr); + + isert_dbg("conn %p Posting Reject\n", isert_conn); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + struct iscsi_text_rsp *hdr = + (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; + u32 txt_rsp_len; + int rc; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); + if (rc < 0) + return rc; + + txt_rsp_len = rc; + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + + if (txt_rsp_len) { + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; + void *txt_rsp_buf = cmd->buf_ptr; + + isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, + txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); + if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) + return -ENOMEM; + + isert_cmd->pdu_buf_len = txt_rsp_len; + tx_dsg->addr = isert_cmd->pdu_buf_dma; + tx_dsg->length = txt_rsp_len; + tx_dsg->lkey = device->pd->local_dma_lkey; + isert_cmd->tx_desc.num_sge = 2; + } + isert_init_send_wr(isert_conn, isert_cmd, send_wr); + + isert_dbg("conn %p Text Response\n", isert_conn); + + return isert_post_response(isert_conn, isert_cmd); +} + +static inline void +isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, + struct ib_sig_domain *domain) +{ + domain->sig_type = IB_SIG_TYPE_T10_DIF; + domain->sig.dif.bg_type = IB_T10DIF_CRC; + domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size; + domain->sig.dif.ref_tag = se_cmd->reftag_seed; + /* + * At the moment we hard code those, but if in the future + * the target core would like to use it, we will take it + * from se_cmd. + */ + domain->sig.dif.apptag_check_mask = 0xffff; + domain->sig.dif.app_escape = true; + domain->sig.dif.ref_escape = true; + if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT || + se_cmd->prot_type == TARGET_DIF_TYPE2_PROT) + domain->sig.dif.ref_remap = true; +}; + +static int +isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) +{ + memset(sig_attrs, 0, sizeof(*sig_attrs)); + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; + isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); + break; + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; + isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); + break; + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire); + isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); + break; + default: + isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); + return -EINVAL; + } + + if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD) + sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; + if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG) + sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; + if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG) + sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; + + return 0; +} + +static int +isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, + struct ib_cqe *cqe, struct ib_send_wr *chain_wr) +{ + struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd; + enum dma_data_direction dir = target_reverse_dma_direction(se_cmd); + u8 port_num = conn->cm_id->port_num; + u64 addr; + u32 rkey, offset; + int ret; + + if (cmd->ctx_init_done) + goto rdma_ctx_post; + + if (dir == DMA_FROM_DEVICE) { + addr = cmd->write_va; + rkey = cmd->write_stag; + offset = cmd->iscsi_cmd->write_data_done; + } else { + addr = cmd->read_va; + rkey = cmd->read_stag; + offset = 0; + } + + if (isert_prot_cmd(conn, se_cmd)) { + struct ib_sig_attrs sig_attrs; + + ret = isert_set_sig_attrs(se_cmd, &sig_attrs); + if (ret) + return ret; + + WARN_ON_ONCE(offset); + ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num, + se_cmd->t_data_sg, se_cmd->t_data_nents, + se_cmd->t_prot_sg, se_cmd->t_prot_nents, + &sig_attrs, addr, rkey, dir); + } else { + ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num, + se_cmd->t_data_sg, se_cmd->t_data_nents, + offset, addr, rkey, dir); + } + + if (ret < 0) { + isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); + return ret; + } + + cmd->ctx_init_done = true; + +rdma_ctx_post: + ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); + if (ret < 0) + isert_err("Cmd: %p failed to post RDMA res\n", cmd); + return ret; +} + +static int +isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = conn->context; + struct ib_cqe *cqe = NULL; + struct ib_send_wr *chain_wr = NULL; + int rc; + + isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", + isert_cmd, se_cmd->data_length); + + if (isert_prot_cmd(isert_conn, se_cmd)) { + isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done; + cqe = &isert_cmd->tx_desc.tx_cqe; + } else { + /* + * Build isert_conn->tx_desc for iSCSI response PDU and attach + */ + isert_create_send_desc(isert_conn, isert_cmd, + &isert_cmd->tx_desc); + iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_conn, isert_cmd, + &isert_cmd->tx_desc.send_wr); + + rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); + if (rc) { + isert_err("ib_post_recv failed with %d\n", rc); + return rc; + } + + chain_wr = &isert_cmd->tx_desc.send_wr; + } + + rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); + isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", + isert_cmd, rc); + return rc; +} + +static int +isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + int ret; + + isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", + isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); + + isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; + ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, + &isert_cmd->tx_desc.tx_cqe, NULL); + + isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", + isert_cmd, ret); + return ret; +} + +static int +isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + int ret = 0; + + switch (state) { + case ISTATE_REMOVE: + spin_lock_bh(&conn->cmd_lock); + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + isert_put_cmd(isert_cmd, true); + break; + case ISTATE_SEND_NOPIN_WANT_RESPONSE: + ret = isert_put_nopin(cmd, conn, false); + break; + default: + isert_err("Unknown immediate state: 0x%02x\n", state); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) +{ + struct isert_conn *isert_conn = conn->context; + int ret; + + switch (state) { + case ISTATE_SEND_LOGOUTRSP: + ret = isert_put_logout_rsp(cmd, conn); + if (!ret) + isert_conn->logout_posted = true; + break; + case ISTATE_SEND_NOPIN: + ret = isert_put_nopin(cmd, conn, true); + break; + case ISTATE_SEND_TASKMGTRSP: + ret = isert_put_tm_rsp(cmd, conn); + break; + case ISTATE_SEND_REJECT: + ret = isert_put_reject(cmd, conn); + break; + case ISTATE_SEND_TEXTRSP: + ret = isert_put_text_rsp(cmd, conn); + break; + case ISTATE_SEND_STATUS: + /* + * Special case for sending non GOOD SCSI status from TX thread + * context during pre se_cmd excecution failure. + */ + ret = isert_put_response(conn, cmd); + break; + default: + isert_err("Unknown response state: 0x%02x\n", state); + ret = -EINVAL; + break; + } + + return ret; +} + +struct rdma_cm_id * +isert_setup_id(struct isert_np *isert_np) +{ + struct iscsi_np *np = isert_np->np; + struct rdma_cm_id *id; + struct sockaddr *sa; + int ret; + + sa = (struct sockaddr *)&np->np_sockaddr; + isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); + + id = rdma_create_id(&init_net, isert_cma_handler, isert_np, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(id)) { + isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); + ret = PTR_ERR(id); + goto out; + } + isert_dbg("id %p context %p\n", id, id->context); + + ret = rdma_bind_addr(id, sa); + if (ret) { + isert_err("rdma_bind_addr() failed: %d\n", ret); + goto out_id; + } + + ret = rdma_listen(id, 0); + if (ret) { + isert_err("rdma_listen() failed: %d\n", ret); + goto out_id; + } + + return id; +out_id: + rdma_destroy_id(id); +out: + return ERR_PTR(ret); +} + +static int +isert_setup_np(struct iscsi_np *np, + struct sockaddr_storage *ksockaddr) +{ + struct isert_np *isert_np; + struct rdma_cm_id *isert_lid; + int ret; + + isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); + if (!isert_np) + return -ENOMEM; + + sema_init(&isert_np->sem, 0); + mutex_init(&isert_np->mutex); + INIT_LIST_HEAD(&isert_np->accepted); + INIT_LIST_HEAD(&isert_np->pending); + isert_np->np = np; + + /* + * Setup the np->np_sockaddr from the passed sockaddr setup + * in iscsi_target_configfs.c code.. + */ + memcpy(&np->np_sockaddr, ksockaddr, + sizeof(struct sockaddr_storage)); + + isert_lid = isert_setup_id(isert_np); + if (IS_ERR(isert_lid)) { + ret = PTR_ERR(isert_lid); + goto out; + } + + isert_np->cm_id = isert_lid; + np->np_context = isert_np; + + return 0; + +out: + kfree(isert_np); + + return ret; +} + +static int +isert_rdma_accept(struct isert_conn *isert_conn) +{ + struct rdma_cm_id *cm_id = isert_conn->cm_id; + struct rdma_conn_param cp; + int ret; + struct iser_cm_hdr rsp_hdr; + + memset(&cp, 0, sizeof(struct rdma_conn_param)); + cp.initiator_depth = isert_conn->initiator_depth; + cp.retry_count = 7; + cp.rnr_retry_count = 7; + + memset(&rsp_hdr, 0, sizeof(rsp_hdr)); + rsp_hdr.flags = ISERT_ZBVA_NOT_USED; + if (!isert_conn->snd_w_inv) + rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED; + cp.private_data = (void *)&rsp_hdr; + cp.private_data_len = sizeof(rsp_hdr); + + ret = rdma_accept(cm_id, &cp); + if (ret) { + isert_err("rdma_accept() failed with: %d\n", ret); + return ret; + } + + return 0; +} + +static int +isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) +{ + struct isert_conn *isert_conn = conn->context; + int ret; + + isert_info("before login_req comp conn: %p\n", isert_conn); + ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); + if (ret) { + isert_err("isert_conn %p interrupted before got login req\n", + isert_conn); + return ret; + } + reinit_completion(&isert_conn->login_req_comp); + + /* + * For login requests after the first PDU, isert_rx_login_req() will + * kick schedule_delayed_work(&conn->login_work) as the packet is + * received, which turns this callback from iscsi_target_do_login_rx() + * into a NOP. + */ + if (!login->first_request) + return 0; + + isert_rx_login_req(isert_conn); + + isert_info("before login_comp conn: %p\n", conn); + ret = wait_for_completion_interruptible(&isert_conn->login_comp); + if (ret) + return ret; + + isert_info("processing login->req: %p\n", login->req); + + return 0; +} + +static void +isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, + struct isert_conn *isert_conn) +{ + struct rdma_cm_id *cm_id = isert_conn->cm_id; + struct rdma_route *cm_route = &cm_id->route; + + conn->login_family = np->np_sockaddr.ss_family; + + conn->login_sockaddr = cm_route->addr.dst_addr; + conn->local_sockaddr = cm_route->addr.src_addr; +} + +static int +isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) +{ + struct isert_np *isert_np = np->np_context; + struct isert_conn *isert_conn; + int ret; + +accept_wait: + ret = down_interruptible(&isert_np->sem); + if (ret) + return -ENODEV; + + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { + spin_unlock_bh(&np->np_thread_lock); + isert_dbg("np_thread_state %d\n", + np->np_thread_state); + /** + * No point in stalling here when np_thread + * is in state RESET/SHUTDOWN/EXIT - bail + **/ + return -ENODEV; + } + spin_unlock_bh(&np->np_thread_lock); + + mutex_lock(&isert_np->mutex); + if (list_empty(&isert_np->pending)) { + mutex_unlock(&isert_np->mutex); + goto accept_wait; + } + isert_conn = list_first_entry(&isert_np->pending, + struct isert_conn, node); + list_del_init(&isert_conn->node); + mutex_unlock(&isert_np->mutex); + + conn->context = isert_conn; + isert_conn->conn = conn; + isert_conn->state = ISER_CONN_BOUND; + + isert_set_conn_info(np, conn, isert_conn); + + isert_dbg("Processing isert_conn: %p\n", isert_conn); + + return 0; +} + +static void +isert_free_np(struct iscsi_np *np) +{ + struct isert_np *isert_np = np->np_context; + struct isert_conn *isert_conn, *n; + + if (isert_np->cm_id) + rdma_destroy_id(isert_np->cm_id); + + /* + * FIXME: At this point we don't have a good way to insure + * that at this point we don't have hanging connections that + * completed RDMA establishment but didn't start iscsi login + * process. So work-around this by cleaning up what ever piled + * up in accepted and pending lists. + */ + mutex_lock(&isert_np->mutex); + if (!list_empty(&isert_np->pending)) { + isert_info("Still have isert pending connections\n"); + list_for_each_entry_safe(isert_conn, n, + &isert_np->pending, + node) { + isert_info("cleaning isert_conn %p state (%d)\n", + isert_conn, isert_conn->state); + isert_connect_release(isert_conn); + } + } + + if (!list_empty(&isert_np->accepted)) { + isert_info("Still have isert accepted connections\n"); + list_for_each_entry_safe(isert_conn, n, + &isert_np->accepted, + node) { + isert_info("cleaning isert_conn %p state (%d)\n", + isert_conn, isert_conn->state); + isert_connect_release(isert_conn); + } + } + mutex_unlock(&isert_np->mutex); + + np->np_context = NULL; + kfree(isert_np); +} + +static void isert_release_work(struct work_struct *work) +{ + struct isert_conn *isert_conn = container_of(work, + struct isert_conn, + release_work); + + isert_info("Starting release conn %p\n", isert_conn); + + mutex_lock(&isert_conn->mutex); + isert_conn->state = ISER_CONN_DOWN; + mutex_unlock(&isert_conn->mutex); + + isert_info("Destroying conn %p\n", isert_conn); + isert_put_conn(isert_conn); +} + +static void +isert_wait4logout(struct isert_conn *isert_conn) +{ + struct iscsi_conn *conn = isert_conn->conn; + + isert_info("conn %p\n", isert_conn); + + if (isert_conn->logout_posted) { + isert_info("conn %p wait for conn_logout_comp\n", isert_conn); + wait_for_completion_timeout(&conn->conn_logout_comp, + SECONDS_FOR_LOGOUT_COMP * HZ); + } +} + +static void +isert_wait4cmds(struct iscsi_conn *conn) +{ + isert_info("iscsi_conn %p\n", conn); + + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); + } +} + +/** + * isert_put_unsol_pending_cmds() - Drop commands waiting for + * unsolicitate dataout + * @conn: iscsi connection + * + * We might still have commands that are waiting for unsolicited + * dataouts messages. We must put the extra reference on those + * before blocking on the target_wait_for_session_cmds + */ +static void +isert_put_unsol_pending_cmds(struct iscsi_conn *conn) +{ + struct iscsi_cmd *cmd, *tmp; + static LIST_HEAD(drop_cmd_list); + + spin_lock_bh(&conn->cmd_lock); + list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) { + if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) && + (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) && + (cmd->write_data_done < cmd->se_cmd.data_length)) + list_move_tail(&cmd->i_conn_node, &drop_cmd_list); + } + spin_unlock_bh(&conn->cmd_lock); + + list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) { + list_del_init(&cmd->i_conn_node); + if (cmd->i_state != ISTATE_REMOVE) { + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + + isert_info("conn %p dropping cmd %p\n", conn, cmd); + isert_put_cmd(isert_cmd, true); + } + } +} + +static void isert_wait_conn(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = conn->context; + + isert_info("Starting conn %p\n", isert_conn); + + mutex_lock(&isert_conn->mutex); + isert_conn_terminate(isert_conn); + mutex_unlock(&isert_conn->mutex); + + ib_drain_qp(isert_conn->qp); + isert_put_unsol_pending_cmds(conn); + isert_wait4cmds(conn); + isert_wait4logout(isert_conn); + + queue_work(isert_release_wq, &isert_conn->release_work); +} + +static void isert_free_conn(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = conn->context; + + ib_drain_qp(isert_conn->qp); + isert_put_conn(isert_conn); +} + +static void isert_get_rx_pdu(struct iscsi_conn *conn) +{ + struct completion comp; + + init_completion(&comp); + + wait_for_completion_interruptible(&comp); +} + +static struct iscsit_transport iser_target_transport = { + .name = "IB/iSER", + .transport_type = ISCSI_INFINIBAND, + .rdma_shutdown = true, + .priv_size = sizeof(struct isert_cmd), + .owner = THIS_MODULE, + .iscsit_setup_np = isert_setup_np, + .iscsit_accept_np = isert_accept_np, + .iscsit_free_np = isert_free_np, + .iscsit_wait_conn = isert_wait_conn, + .iscsit_free_conn = isert_free_conn, + .iscsit_get_login_rx = isert_get_login_rx, + .iscsit_put_login_tx = isert_put_login_tx, + .iscsit_immediate_queue = isert_immediate_queue, + .iscsit_response_queue = isert_response_queue, + .iscsit_get_dataout = isert_get_dataout, + .iscsit_queue_data_in = isert_put_datain, + .iscsit_queue_status = isert_put_response, + .iscsit_aborted_task = isert_aborted_task, + .iscsit_get_rx_pdu = isert_get_rx_pdu, + .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, +}; + +static int __init isert_init(void) +{ + int ret; + + isert_comp_wq = alloc_workqueue("isert_comp_wq", + WQ_UNBOUND | WQ_HIGHPRI, 0); + if (!isert_comp_wq) { + isert_err("Unable to allocate isert_comp_wq\n"); + return -ENOMEM; + } + + isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); + if (!isert_release_wq) { + isert_err("Unable to allocate isert_release_wq\n"); + ret = -ENOMEM; + goto destroy_comp_wq; + } + + iscsit_register_transport(&iser_target_transport); + isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); + + return 0; + +destroy_comp_wq: + destroy_workqueue(isert_comp_wq); + + return ret; +} + +static void __exit isert_exit(void) +{ + flush_scheduled_work(); + destroy_workqueue(isert_release_wq); + destroy_workqueue(isert_comp_wq); + iscsit_unregister_transport(&iser_target_transport); + isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); +} + +MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(isert_init); +module_exit(isert_exit); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h new file mode 100644 index 000000000..3b296bac4 --- /dev/null +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> +#include <rdma/rw.h> +#include <scsi/iser.h> + + +#define DRV_NAME "isert" +#define PFX DRV_NAME ": " + +#define isert_dbg(fmt, arg...) \ + do { \ + if (unlikely(isert_debug_level > 2)) \ + printk(KERN_DEBUG PFX "%s: " fmt,\ + __func__ , ## arg); \ + } while (0) + +#define isert_warn(fmt, arg...) \ + do { \ + if (unlikely(isert_debug_level > 0)) \ + pr_warn(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define isert_info(fmt, arg...) \ + do { \ + if (unlikely(isert_debug_level > 1)) \ + pr_info(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define isert_err(fmt, arg...) \ + pr_err(PFX "%s: " fmt, __func__ , ## arg) + +/* Constant PDU lengths calculations */ +#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \ + sizeof(struct iscsi_hdr)) +#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN) + +/* QP settings */ +/* Maximal bounds on received asynchronous PDUs */ +#define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */ + +#define ISERT_MAX_RX_MISC_PDUS 6 /* + * NOOP_OUT(2), TEXT(1), + * SCSI_TMFUNC(2), LOGOUT(1) + */ + +#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */ + +#define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX) + +#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2) + +#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \ + ISERT_MAX_TX_MISC_PDUS + \ + ISERT_MAX_RX_MISC_PDUS) + +#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ + (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ + sizeof(struct ib_cqe) + sizeof(bool))) + +#define ISCSI_ISER_SG_TABLESIZE 256 + +enum isert_desc_type { + ISCSI_TX_CONTROL, + ISCSI_TX_DATAIN +}; + +enum iser_conn_state { + ISER_CONN_INIT, + ISER_CONN_UP, + ISER_CONN_BOUND, + ISER_CONN_FULL_FEATURE, + ISER_CONN_TERMINATING, + ISER_CONN_DOWN, +}; + +struct iser_rx_desc { + struct iser_ctrl iser_header; + struct iscsi_hdr iscsi_header; + char data[ISCSI_DEF_MAX_RECV_SEG_LEN]; + u64 dma_addr; + struct ib_sge rx_sg; + struct ib_cqe rx_cqe; + bool in_use; + char pad[ISER_RX_PAD_SIZE]; +} __packed; + +static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_rx_desc, rx_cqe); +} + +struct iser_tx_desc { + struct iser_ctrl iser_header; + struct iscsi_hdr iscsi_header; + enum isert_desc_type type; + u64 dma_addr; + struct ib_sge tx_sg[2]; + struct ib_cqe tx_cqe; + int num_sge; + struct ib_send_wr send_wr; +} __packed; + +static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe) +{ + return container_of(cqe, struct iser_tx_desc, tx_cqe); +} + +struct isert_cmd { + uint32_t read_stag; + uint32_t write_stag; + uint64_t read_va; + uint64_t write_va; + uint32_t inv_rkey; + u64 pdu_buf_dma; + u32 pdu_buf_len; + struct isert_conn *conn; + struct iscsi_cmd *iscsi_cmd; + struct iser_tx_desc tx_desc; + struct iser_rx_desc *rx_desc; + struct rdma_rw_ctx rw; + struct work_struct comp_work; + struct scatterlist sg; + bool ctx_init_done; +}; + +static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc) +{ + return container_of(desc, struct isert_cmd, tx_desc); +} + +struct isert_device; + +struct isert_conn { + enum iser_conn_state state; + u32 responder_resources; + u32 initiator_depth; + bool pi_support; + struct iser_rx_desc *login_req_buf; + char *login_rsp_buf; + u64 login_req_dma; + int login_req_len; + u64 login_rsp_dma; + struct iser_rx_desc *rx_descs; + struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS]; + struct iscsi_conn *conn; + struct list_head node; + struct completion login_comp; + struct completion login_req_comp; + struct iser_tx_desc login_tx_desc; + struct rdma_cm_id *cm_id; + struct ib_qp *qp; + struct isert_device *device; + struct mutex mutex; + struct kref kref; + struct work_struct release_work; + bool logout_posted; + bool snd_w_inv; + wait_queue_head_t rem_wait; + bool dev_removed; +}; + +#define ISERT_MAX_CQ 64 + +/** + * struct isert_comp - iSER completion context + * + * @device: pointer to device handle + * @cq: completion queue + * @active_qps: Number of active QPs attached + * to completion context + */ +struct isert_comp { + struct isert_device *device; + struct ib_cq *cq; + int active_qps; +}; + +struct isert_device { + bool pi_capable; + int refcount; + struct ib_device *ib_device; + struct ib_pd *pd; + struct isert_comp *comps; + int comps_used; + struct list_head dev_node; +}; + +struct isert_np { + struct iscsi_np *np; + struct semaphore sem; + struct rdma_cm_id *cm_id; + struct mutex mutex; + struct list_head accepted; + struct list_head pending; +}; diff --git a/drivers/infiniband/ulp/opa_vnic/Kconfig b/drivers/infiniband/ulp/opa_vnic/Kconfig new file mode 100644 index 000000000..48132ab5e --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/Kconfig @@ -0,0 +1,8 @@ +config INFINIBAND_OPA_VNIC + tristate "Intel OPA VNIC support" + depends on X86_64 && INFINIBAND + ---help--- + This is Omni-Path (OPA) Virtual Network Interface Controller (VNIC) + driver for Ethernet over Omni-Path feature. It implements the HW + independent VNIC functionality. It interfaces with Linux stack for + data path and IB MAD for the control path. diff --git a/drivers/infiniband/ulp/opa_vnic/Makefile b/drivers/infiniband/ulp/opa_vnic/Makefile new file mode 100644 index 000000000..8061b287c --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/Makefile @@ -0,0 +1,7 @@ +# Makefile - Intel Omni-Path Virtual Network Controller driver +# Copyright(c) 2017, Intel Corporation. +# +obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic.o + +opa_vnic-y := opa_vnic_netdev.o opa_vnic_encap.o opa_vnic_ethtool.o \ + opa_vnic_vema.o opa_vnic_vema_iface.o diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c new file mode 100644 index 000000000..31cd36141 --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -0,0 +1,513 @@ +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This file contains OPA VNIC encapsulation/decapsulation function. + */ + +#include <linux/if_ether.h> +#include <linux/if_vlan.h> + +#include "opa_vnic_internal.h" + +/* OPA 16B Header fields */ +#define OPA_16B_LID_MASK 0xFFFFFull +#define OPA_16B_SLID_HIGH_SHFT 8 +#define OPA_16B_SLID_MASK 0xF00ull +#define OPA_16B_DLID_MASK 0xF000ull +#define OPA_16B_DLID_HIGH_SHFT 12 +#define OPA_16B_LEN_SHFT 20 +#define OPA_16B_SC_SHFT 20 +#define OPA_16B_RC_SHFT 25 +#define OPA_16B_PKEY_SHFT 16 + +#define OPA_VNIC_L4_HDR_SHFT 16 + +/* L2+L4 hdr len is 20 bytes (5 quad words) */ +#define OPA_VNIC_HDR_QW_LEN 5 + +static inline void opa_vnic_make_header(u8 *hdr, u32 slid, u32 dlid, u16 len, + u16 pkey, u16 entropy, u8 sc, u8 rc, + u8 l4_type, u16 l4_hdr) +{ + /* h[1]: LT=1, 16B L2=10 */ + u32 h[OPA_VNIC_HDR_QW_LEN] = {0, 0xc0000000, 0, 0, 0}; + + h[2] = l4_type; + h[3] = entropy; + h[4] = l4_hdr << OPA_VNIC_L4_HDR_SHFT; + + /* Extract and set 4 upper bits and 20 lower bits of the lids */ + h[0] |= (slid & OPA_16B_LID_MASK); + h[2] |= ((slid >> (20 - OPA_16B_SLID_HIGH_SHFT)) & OPA_16B_SLID_MASK); + + h[1] |= (dlid & OPA_16B_LID_MASK); + h[2] |= ((dlid >> (20 - OPA_16B_DLID_HIGH_SHFT)) & OPA_16B_DLID_MASK); + + h[0] |= (len << OPA_16B_LEN_SHFT); + h[1] |= (rc << OPA_16B_RC_SHFT); + h[1] |= (sc << OPA_16B_SC_SHFT); + h[2] |= ((u32)pkey << OPA_16B_PKEY_SHFT); + + memcpy(hdr, h, OPA_VNIC_HDR_LEN); +} + +/* + * Using a simple hash table for mac table implementation with the last octet + * of mac address as a key. + */ +static void opa_vnic_free_mac_tbl(struct hlist_head *mactbl) +{ + struct opa_vnic_mac_tbl_node *node; + struct hlist_node *tmp; + int bkt; + + if (!mactbl) + return; + + vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) { + hash_del(&node->hlist); + kfree(node); + } + kfree(mactbl); +} + +static struct hlist_head *opa_vnic_alloc_mac_tbl(void) +{ + u32 size = sizeof(struct hlist_head) * OPA_VNIC_MAC_TBL_SIZE; + struct hlist_head *mactbl; + + mactbl = kzalloc(size, GFP_KERNEL); + if (!mactbl) + return ERR_PTR(-ENOMEM); + + vnic_hash_init(mactbl); + return mactbl; +} + +/* opa_vnic_release_mac_tbl - empty and free the mac table */ +void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter) +{ + struct hlist_head *mactbl; + + mutex_lock(&adapter->mactbl_lock); + mactbl = rcu_access_pointer(adapter->mactbl); + rcu_assign_pointer(adapter->mactbl, NULL); + synchronize_rcu(); + opa_vnic_free_mac_tbl(mactbl); + adapter->info.vport.mac_tbl_digest = 0; + mutex_unlock(&adapter->mactbl_lock); +} + +/* + * opa_vnic_query_mac_tbl - query the mac table for a section + * + * This function implements query of specific function of the mac table. + * The function also expects the requested range to be valid. + */ +void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter, + struct opa_veswport_mactable *tbl) +{ + struct opa_vnic_mac_tbl_node *node; + struct hlist_head *mactbl; + int bkt; + u16 loffset, lnum_entries; + + rcu_read_lock(); + mactbl = rcu_dereference(adapter->mactbl); + if (!mactbl) + goto get_mac_done; + + loffset = be16_to_cpu(tbl->offset); + lnum_entries = be16_to_cpu(tbl->num_entries); + + vnic_hash_for_each(mactbl, bkt, node, hlist) { + struct __opa_vnic_mactable_entry *nentry = &node->entry; + struct opa_veswport_mactable_entry *entry; + + if ((node->index < loffset) || + (node->index >= (loffset + lnum_entries))) + continue; + + /* populate entry in the tbl corresponding to the index */ + entry = &tbl->tbl_entries[node->index - loffset]; + memcpy(entry->mac_addr, nentry->mac_addr, + ARRAY_SIZE(entry->mac_addr)); + memcpy(entry->mac_addr_mask, nentry->mac_addr_mask, + ARRAY_SIZE(entry->mac_addr_mask)); + entry->dlid_sd = cpu_to_be32(nentry->dlid_sd); + } + tbl->mac_tbl_digest = cpu_to_be32(adapter->info.vport.mac_tbl_digest); +get_mac_done: + rcu_read_unlock(); +} + +/* + * opa_vnic_update_mac_tbl - update mac table section + * + * This function updates the specified section of the mac table. + * The procedure includes following steps. + * - Allocate a new mac (hash) table. + * - Add the specified entries to the new table. + * (except the ones that are requested to be deleted). + * - Add all the other entries from the old mac table. + * - If there is a failure, free the new table and return. + * - Switch to the new table. + * - Free the old table and return. + * + * The function also expects the requested range to be valid. + */ +int opa_vnic_update_mac_tbl(struct opa_vnic_adapter *adapter, + struct opa_veswport_mactable *tbl) +{ + struct opa_vnic_mac_tbl_node *node, *new_node; + struct hlist_head *new_mactbl, *old_mactbl; + int i, bkt, rc = 0; + u8 key; + u16 loffset, lnum_entries; + + mutex_lock(&adapter->mactbl_lock); + /* allocate new mac table */ + new_mactbl = opa_vnic_alloc_mac_tbl(); + if (IS_ERR(new_mactbl)) { + mutex_unlock(&adapter->mactbl_lock); + return PTR_ERR(new_mactbl); + } + + loffset = be16_to_cpu(tbl->offset); + lnum_entries = be16_to_cpu(tbl->num_entries); + + /* add updated entries to the new mac table */ + for (i = 0; i < lnum_entries; i++) { + struct __opa_vnic_mactable_entry *nentry; + struct opa_veswport_mactable_entry *entry = + &tbl->tbl_entries[i]; + u8 *mac_addr = entry->mac_addr; + u8 empty_mac[ETH_ALEN] = { 0 }; + + v_dbg("new mac entry %4d: %02x:%02x:%02x:%02x:%02x:%02x %x\n", + loffset + i, mac_addr[0], mac_addr[1], mac_addr[2], + mac_addr[3], mac_addr[4], mac_addr[5], + entry->dlid_sd); + + /* if the entry is being removed, do not add it */ + if (!memcmp(mac_addr, empty_mac, ARRAY_SIZE(empty_mac))) + continue; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + rc = -ENOMEM; + goto updt_done; + } + + node->index = loffset + i; + nentry = &node->entry; + memcpy(nentry->mac_addr, entry->mac_addr, + ARRAY_SIZE(nentry->mac_addr)); + memcpy(nentry->mac_addr_mask, entry->mac_addr_mask, + ARRAY_SIZE(nentry->mac_addr_mask)); + nentry->dlid_sd = be32_to_cpu(entry->dlid_sd); + key = node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX]; + vnic_hash_add(new_mactbl, &node->hlist, key); + } + + /* add other entries from current mac table to new mac table */ + old_mactbl = rcu_access_pointer(adapter->mactbl); + if (!old_mactbl) + goto switch_tbl; + + vnic_hash_for_each(old_mactbl, bkt, node, hlist) { + if ((node->index >= loffset) && + (node->index < (loffset + lnum_entries))) + continue; + + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) { + rc = -ENOMEM; + goto updt_done; + } + + new_node->index = node->index; + memcpy(&new_node->entry, &node->entry, sizeof(node->entry)); + key = new_node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX]; + vnic_hash_add(new_mactbl, &new_node->hlist, key); + } + +switch_tbl: + /* switch to new table */ + rcu_assign_pointer(adapter->mactbl, new_mactbl); + synchronize_rcu(); + + adapter->info.vport.mac_tbl_digest = be32_to_cpu(tbl->mac_tbl_digest); +updt_done: + /* upon failure, free the new table; otherwise, free the old table */ + if (rc) + opa_vnic_free_mac_tbl(new_mactbl); + else + opa_vnic_free_mac_tbl(old_mactbl); + + mutex_unlock(&adapter->mactbl_lock); + return rc; +} + +/* opa_vnic_chk_mac_tbl - check mac table for dlid */ +static uint32_t opa_vnic_chk_mac_tbl(struct opa_vnic_adapter *adapter, + struct ethhdr *mac_hdr) +{ + struct opa_vnic_mac_tbl_node *node; + struct hlist_head *mactbl; + u32 dlid = 0; + u8 key; + + rcu_read_lock(); + mactbl = rcu_dereference(adapter->mactbl); + if (unlikely(!mactbl)) + goto chk_done; + + key = mac_hdr->h_dest[OPA_VNIC_MAC_HASH_IDX]; + vnic_hash_for_each_possible(mactbl, node, hlist, key) { + struct __opa_vnic_mactable_entry *entry = &node->entry; + + /* if related to source mac, skip */ + if (unlikely(OPA_VNIC_DLID_SD_IS_SRC_MAC(entry->dlid_sd))) + continue; + + if (!memcmp(node->entry.mac_addr, mac_hdr->h_dest, + ARRAY_SIZE(node->entry.mac_addr))) { + /* mac address found */ + dlid = OPA_VNIC_DLID_SD_GET_DLID(node->entry.dlid_sd); + break; + } + } + +chk_done: + rcu_read_unlock(); + return dlid; +} + +/* opa_vnic_get_dlid - find and return the DLID */ +static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter, + struct sk_buff *skb, u8 def_port) +{ + struct __opa_veswport_info *info = &adapter->info; + struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); + u32 dlid; + + dlid = opa_vnic_chk_mac_tbl(adapter, mac_hdr); + if (dlid) + return dlid; + + if (is_multicast_ether_addr(mac_hdr->h_dest)) { + dlid = info->vesw.u_mcast_dlid; + } else { + if (is_local_ether_addr(mac_hdr->h_dest)) { + dlid = ((uint32_t)mac_hdr->h_dest[5] << 16) | + ((uint32_t)mac_hdr->h_dest[4] << 8) | + mac_hdr->h_dest[3]; + if (unlikely(!dlid)) + v_warn("Null dlid in MAC address\n"); + } else if (def_port != OPA_VNIC_INVALID_PORT) { + if (def_port < OPA_VESW_MAX_NUM_DEF_PORT) + dlid = info->vesw.u_ucast_dlid[def_port]; + } + } + + return dlid; +} + +/* opa_vnic_get_sc - return the service class */ +static u8 opa_vnic_get_sc(struct __opa_veswport_info *info, + struct sk_buff *skb) +{ + struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); + u16 vlan_tci; + u8 sc; + + if (!__vlan_get_tag(skb, &vlan_tci)) { + u8 pcp = OPA_VNIC_VLAN_PCP(vlan_tci); + + if (is_multicast_ether_addr(mac_hdr->h_dest)) + sc = info->vport.pcp_to_sc_mc[pcp]; + else + sc = info->vport.pcp_to_sc_uc[pcp]; + } else { + if (is_multicast_ether_addr(mac_hdr->h_dest)) + sc = info->vport.non_vlan_sc_mc; + else + sc = info->vport.non_vlan_sc_uc; + } + + return sc; +} + +u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb) +{ + struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); + struct __opa_veswport_info *info = &adapter->info; + u8 vl; + + if (skb_vlan_tag_present(skb)) { + u8 pcp = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; + + if (is_multicast_ether_addr(mac_hdr->h_dest)) + vl = info->vport.pcp_to_vl_mc[pcp]; + else + vl = info->vport.pcp_to_vl_uc[pcp]; + } else { + if (is_multicast_ether_addr(mac_hdr->h_dest)) + vl = info->vport.non_vlan_vl_mc; + else + vl = info->vport.non_vlan_vl_uc; + } + + return vl; +} + +/* opa_vnic_get_rc - return the routing control */ +static u8 opa_vnic_get_rc(struct __opa_veswport_info *info, + struct sk_buff *skb) +{ + u8 proto, rout_ctrl; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IPV6): + proto = ipv6_hdr(skb)->nexthdr; + if (proto == IPPROTO_TCP) + rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, + IPV6_TCP); + else if (proto == IPPROTO_UDP) + rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, + IPV6_UDP); + else + rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV6); + break; + case htons(ETH_P_IP): + proto = ip_hdr(skb)->protocol; + if (proto == IPPROTO_TCP) + rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, + IPV4_TCP); + else if (proto == IPPROTO_UDP) + rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, + IPV4_UDP); + else + rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV4); + break; + default: + rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, DEFAULT); + } + + return rout_ctrl; +} + +/* opa_vnic_calc_entropy - calculate the packet entropy */ +u8 opa_vnic_calc_entropy(struct sk_buff *skb) +{ + u32 hash = skb_get_hash(skb); + + /* store XOR of all bytes in lower 8 bits */ + hash ^= hash >> 8; + hash ^= hash >> 16; + + /* return lower 8 bits as entropy */ + return (u8)(hash & 0xFF); +} + +/* opa_vnic_get_def_port - get default port based on entropy */ +static inline u8 opa_vnic_get_def_port(struct opa_vnic_adapter *adapter, + u8 entropy) +{ + u8 flow_id; + + /* Add the upper and lower 4-bits of entropy to get the flow id */ + flow_id = ((entropy & 0xf) + (entropy >> 4)); + return adapter->flow_tbl[flow_id & (OPA_VNIC_FLOW_TBL_SIZE - 1)]; +} + +/* Calculate packet length including OPA header, crc and padding */ +static inline int opa_vnic_wire_length(struct sk_buff *skb) +{ + u32 pad_len; + + /* padding for 8 bytes size alignment */ + pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7; + pad_len += OPA_VNIC_ICRC_TAIL_LEN; + + return (skb->len + pad_len) >> 3; +} + +/* opa_vnic_encap_skb - encapsulate skb packet with OPA header and meta data */ +void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb) +{ + struct __opa_veswport_info *info = &adapter->info; + struct opa_vnic_skb_mdata *mdata; + u8 def_port, sc, rc, entropy, *hdr; + u16 len, l4_hdr; + u32 dlid; + + hdr = skb_push(skb, OPA_VNIC_HDR_LEN); + + entropy = opa_vnic_calc_entropy(skb); + def_port = opa_vnic_get_def_port(adapter, entropy); + len = opa_vnic_wire_length(skb); + dlid = opa_vnic_get_dlid(adapter, skb, def_port); + sc = opa_vnic_get_sc(info, skb); + rc = opa_vnic_get_rc(info, skb); + l4_hdr = info->vesw.vesw_id; + + mdata = skb_push(skb, sizeof(*mdata)); + mdata->vl = opa_vnic_get_vl(adapter, skb); + mdata->entropy = entropy; + mdata->flags = 0; + if (unlikely(!dlid)) { + mdata->flags = OPA_VNIC_SKB_MDATA_ENCAP_ERR; + return; + } + + opa_vnic_make_header(hdr, info->vport.encap_slid, dlid, len, + info->vesw.pkey, entropy, sc, rc, + OPA_VNIC_L4_ETHR, l4_hdr); +} diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h new file mode 100644 index 000000000..e4c9bf2ef --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h @@ -0,0 +1,501 @@ +#ifndef _OPA_VNIC_ENCAP_H +#define _OPA_VNIC_ENCAP_H +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This file contains all OPA VNIC declaration required for encapsulation + * and decapsulation of Ethernet packets + */ + +#include <linux/types.h> +#include <rdma/ib_mad.h> + +/* EMA class version */ +#define OPA_EMA_CLASS_VERSION 0x80 + +/* + * Define the Intel vendor management class for OPA + * ETHERNET MANAGEMENT + */ +#define OPA_MGMT_CLASS_INTEL_EMA 0x34 + +/* EM attribute IDs */ +#define OPA_EM_ATTR_CLASS_PORT_INFO 0x0001 +#define OPA_EM_ATTR_VESWPORT_INFO 0x0011 +#define OPA_EM_ATTR_VESWPORT_MAC_ENTRIES 0x0012 +#define OPA_EM_ATTR_IFACE_UCAST_MACS 0x0013 +#define OPA_EM_ATTR_IFACE_MCAST_MACS 0x0014 +#define OPA_EM_ATTR_DELETE_VESW 0x0015 +#define OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS 0x0020 +#define OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS 0x0022 + +/* VNIC configured and operational state values */ +#define OPA_VNIC_STATE_DROP_ALL 0x1 +#define OPA_VNIC_STATE_FORWARDING 0x3 + +#define OPA_VESW_MAX_NUM_DEF_PORT 16 +#define OPA_VNIC_MAX_NUM_PCP 8 + +#define OPA_VNIC_EMA_DATA (OPA_MGMT_MAD_SIZE - IB_MGMT_VENDOR_HDR) + +/* Defines for vendor specific notice(trap) attributes */ +#define OPA_INTEL_EMA_NOTICE_TYPE_INFO 0x04 + +/* INTEL OUI */ +#define INTEL_OUI_1 0x00 +#define INTEL_OUI_2 0x06 +#define INTEL_OUI_3 0x6a + +/* Trap opcodes sent from VNIC */ +#define OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE 0x1 +#define OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE 0x2 +#define OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE 0x3 + +#define OPA_VNIC_DLID_SD_IS_SRC_MAC(dlid_sd) (!!((dlid_sd) & 0x20)) +#define OPA_VNIC_DLID_SD_GET_DLID(dlid_sd) ((dlid_sd) >> 8) + +/* VNIC Ethernet link status */ +#define OPA_VNIC_ETH_LINK_UP 1 +#define OPA_VNIC_ETH_LINK_DOWN 2 + +/* routing control */ +#define OPA_VNIC_ENCAP_RC_DEFAULT 0 +#define OPA_VNIC_ENCAP_RC_IPV4 4 +#define OPA_VNIC_ENCAP_RC_IPV4_UDP 8 +#define OPA_VNIC_ENCAP_RC_IPV4_TCP 12 +#define OPA_VNIC_ENCAP_RC_IPV6 16 +#define OPA_VNIC_ENCAP_RC_IPV6_TCP 20 +#define OPA_VNIC_ENCAP_RC_IPV6_UDP 24 + +#define OPA_VNIC_ENCAP_RC_EXT(w, b) (((w) >> OPA_VNIC_ENCAP_RC_ ## b) & 0x7) + +/** + * struct opa_vesw_info - OPA vnic switch information + * @fabric_id: 10-bit fabric id + * @vesw_id: 12-bit virtual ethernet switch id + * @def_port_mask: bitmask of default ports + * @pkey: partition key + * @u_mcast_dlid: unknown multicast dlid + * @u_ucast_dlid: array of unknown unicast dlids + * @rc: routing control + * @eth_mtu: Ethernet MTU + */ +struct opa_vesw_info { + __be16 fabric_id; + __be16 vesw_id; + + u8 rsvd0[6]; + __be16 def_port_mask; + + u8 rsvd1[2]; + __be16 pkey; + + u8 rsvd2[4]; + __be32 u_mcast_dlid; + __be32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT]; + + __be32 rc; + + u8 rsvd3[56]; + __be16 eth_mtu; + u8 rsvd4[2]; +} __packed; + +/** + * struct opa_per_veswport_info - OPA vnic per port information + * @port_num: port number + * @eth_link_status: current ethernet link state + * @base_mac_addr: base mac address + * @config_state: configured port state + * @oper_state: operational port state + * @max_mac_tbl_ent: max number of mac table entries + * @max_smac_ent: max smac entries in mac table + * @mac_tbl_digest: mac table digest + * @encap_slid: base slid for the port + * @pcp_to_sc_uc: sc by pcp index for unicast ethernet packets + * @pcp_to_vl_uc: vl by pcp index for unicast ethernet packets + * @pcp_to_sc_mc: sc by pcp index for multicast ethernet packets + * @pcp_to_vl_mc: vl by pcp index for multicast ethernet packets + * @non_vlan_sc_uc: sc for non-vlan unicast ethernet packets + * @non_vlan_vl_uc: vl for non-vlan unicast ethernet packets + * @non_vlan_sc_mc: sc for non-vlan multicast ethernet packets + * @non_vlan_vl_mc: vl for non-vlan multicast ethernet packets + * @uc_macs_gen_count: generation count for unicast macs list + * @mc_macs_gen_count: generation count for multicast macs list + */ +struct opa_per_veswport_info { + __be32 port_num; + + u8 eth_link_status; + u8 rsvd0[3]; + + u8 base_mac_addr[ETH_ALEN]; + u8 config_state; + u8 oper_state; + + __be16 max_mac_tbl_ent; + __be16 max_smac_ent; + __be32 mac_tbl_digest; + u8 rsvd1[4]; + + __be32 encap_slid; + + u8 pcp_to_sc_uc[OPA_VNIC_MAX_NUM_PCP]; + u8 pcp_to_vl_uc[OPA_VNIC_MAX_NUM_PCP]; + u8 pcp_to_sc_mc[OPA_VNIC_MAX_NUM_PCP]; + u8 pcp_to_vl_mc[OPA_VNIC_MAX_NUM_PCP]; + + u8 non_vlan_sc_uc; + u8 non_vlan_vl_uc; + u8 non_vlan_sc_mc; + u8 non_vlan_vl_mc; + + u8 rsvd2[48]; + + __be16 uc_macs_gen_count; + __be16 mc_macs_gen_count; + + u8 rsvd3[8]; +} __packed; + +/** + * struct opa_veswport_info - OPA vnic port information + * @vesw: OPA vnic switch information + * @vport: OPA vnic per port information + * + * On host, each of the virtual ethernet ports belongs + * to a different virtual ethernet switches. + */ +struct opa_veswport_info { + struct opa_vesw_info vesw; + struct opa_per_veswport_info vport; +}; + +/** + * struct opa_veswport_mactable_entry - single entry in the forwarding table + * @mac_addr: MAC address + * @mac_addr_mask: MAC address bit mask + * @dlid_sd: Matching DLID and side data + * + * On the host each virtual ethernet port will have + * a forwarding table. These tables are used to + * map a MAC to a LID and other data. For more + * details see struct opa_veswport_mactable_entries. + * This is the structure of a single mactable entry + */ +struct opa_veswport_mactable_entry { + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; + __be32 dlid_sd; +} __packed; + +/** + * struct opa_veswport_mactable - Forwarding table array + * @offset: mac table starting offset + * @num_entries: Number of entries to get or set + * @mac_tbl_digest: mac table digest + * @tbl_entries[]: Array of table entries + * + * The EM sends down this structure in a MAD indicating + * the starting offset in the forwarding table that this + * entry is to be loaded into and the number of entries + * that that this MAD instance contains + * The mac_tbl_digest has been added to this MAD structure. It will be set by + * the EM and it will be used by the EM to check if there are any + * discrepancies with this value and the value + * maintained by the EM in the case of VNIC port being deleted or unloaded + * A new instantiation of a VNIC will always have a value of zero. + * This value is stored as part of the vnic adapter structure and will be + * accessed by the GET and SET routines for both the mactable entries and the + * veswport info. + */ +struct opa_veswport_mactable { + __be16 offset; + __be16 num_entries; + __be32 mac_tbl_digest; + struct opa_veswport_mactable_entry tbl_entries[0]; +} __packed; + +/** + * struct opa_veswport_summary_counters - summary counters + * @vp_instance: vport instance on the OPA port + * @vesw_id: virtual ethernet switch id + * @veswport_num: virtual ethernet switch port number + * @tx_errors: transmit errors + * @rx_errors: receive errors + * @tx_packets: transmit packets + * @rx_packets: receive packets + * @tx_bytes: transmit bytes + * @rx_bytes: receive bytes + * @tx_unicast: unicast packets transmitted + * @tx_mcastbcast: multicast/broadcast packets transmitted + * @tx_untagged: non-vlan packets transmitted + * @tx_vlan: vlan packets transmitted + * @tx_64_size: transmit packet length is 64 bytes + * @tx_65_127: transmit packet length is >=65 and < 127 bytes + * @tx_128_255: transmit packet length is >=128 and < 255 bytes + * @tx_256_511: transmit packet length is >=256 and < 511 bytes + * @tx_512_1023: transmit packet length is >=512 and < 1023 bytes + * @tx_1024_1518: transmit packet length is >=1024 and < 1518 bytes + * @tx_1519_max: transmit packet length >= 1519 bytes + * @rx_unicast: unicast packets received + * @rx_mcastbcast: multicast/broadcast packets received + * @rx_untagged: non-vlan packets received + * @rx_vlan: vlan packets received + * @rx_64_size: received packet length is 64 bytes + * @rx_65_127: received packet length is >=65 and < 127 bytes + * @rx_128_255: received packet length is >=128 and < 255 bytes + * @rx_256_511: received packet length is >=256 and < 511 bytes + * @rx_512_1023: received packet length is >=512 and < 1023 bytes + * @rx_1024_1518: received packet length is >=1024 and < 1518 bytes + * @rx_1519_max: received packet length >= 1519 bytes + * + * All the above are counters of corresponding conditions. + */ +struct opa_veswport_summary_counters { + __be16 vp_instance; + __be16 vesw_id; + __be32 veswport_num; + + __be64 tx_errors; + __be64 rx_errors; + __be64 tx_packets; + __be64 rx_packets; + __be64 tx_bytes; + __be64 rx_bytes; + + __be64 tx_unicast; + __be64 tx_mcastbcast; + + __be64 tx_untagged; + __be64 tx_vlan; + + __be64 tx_64_size; + __be64 tx_65_127; + __be64 tx_128_255; + __be64 tx_256_511; + __be64 tx_512_1023; + __be64 tx_1024_1518; + __be64 tx_1519_max; + + __be64 rx_unicast; + __be64 rx_mcastbcast; + + __be64 rx_untagged; + __be64 rx_vlan; + + __be64 rx_64_size; + __be64 rx_65_127; + __be64 rx_128_255; + __be64 rx_256_511; + __be64 rx_512_1023; + __be64 rx_1024_1518; + __be64 rx_1519_max; + + __be64 reserved[16]; +} __packed; + +/** + * struct opa_veswport_error_counters - error counters + * @vp_instance: vport instance on the OPA port + * @vesw_id: virtual ethernet switch id + * @veswport_num: virtual ethernet switch port number + * @tx_errors: transmit errors + * @rx_errors: receive errors + * @tx_smac_filt: smac filter errors + * @tx_dlid_zero: transmit packets with invalid dlid + * @tx_logic: other transmit errors + * @tx_drop_state: packet tansmission in non-forward port state + * @rx_bad_veswid: received packet with invalid vesw id + * @rx_runt: received ethernet packet with length < 64 bytes + * @rx_oversize: received ethernet packet with length > MTU size + * @rx_eth_down: received packets when interface is down + * @rx_drop_state: received packets in non-forwarding port state + * @rx_logic: other receive errors + * + * All the above are counters of corresponding erorr conditions. + */ +struct opa_veswport_error_counters { + __be16 vp_instance; + __be16 vesw_id; + __be32 veswport_num; + + __be64 tx_errors; + __be64 rx_errors; + + __be64 rsvd0; + __be64 tx_smac_filt; + __be64 rsvd1; + __be64 rsvd2; + __be64 rsvd3; + __be64 tx_dlid_zero; + __be64 rsvd4; + __be64 tx_logic; + __be64 rsvd5; + __be64 tx_drop_state; + + __be64 rx_bad_veswid; + __be64 rsvd6; + __be64 rx_runt; + __be64 rx_oversize; + __be64 rsvd7; + __be64 rx_eth_down; + __be64 rx_drop_state; + __be64 rx_logic; + __be64 rsvd8; + + __be64 rsvd9[16]; +} __packed; + +/** + * struct opa_veswport_trap - Trap message sent to EM by VNIC + * @fabric_id: 10 bit fabric id + * @veswid: 12 bit virtual ethernet switch id + * @veswportnum: logical port number on the Virtual switch + * @opaportnum: physical port num (redundant on host) + * @veswportindex: switch port index on opa port 0 based + * @opcode: operation + * @reserved: 32 bit for alignment + * + * The VNIC will send trap messages to the Ethernet manager to + * inform it about changes to the VNIC config, behaviour etc. + * This is the format of the trap payload. + */ +struct opa_veswport_trap { + __be16 fabric_id; + __be16 veswid; + __be32 veswportnum; + __be16 opaportnum; + u8 veswportindex; + u8 opcode; + __be32 reserved; +} __packed; + +/** + * struct opa_vnic_iface_macs_entry - single entry in the mac list + * @mac_addr: MAC address + */ +struct opa_vnic_iface_mac_entry { + u8 mac_addr[ETH_ALEN]; +}; + +/** + * struct opa_veswport_iface_macs - Msg to set globally administered MAC + * @start_idx: position of first entry (0 based) + * @num_macs_in_msg: number of MACs in this message + * @tot_macs_in_lst: The total number of MACs the agent has + * @gen_count: gen_count to indicate change + * @entry: The mac list entry + * + * Same attribute IDS and attribute modifiers as in locally administered + * addresses used to set globally administered addresses + */ +struct opa_veswport_iface_macs { + __be16 start_idx; + __be16 num_macs_in_msg; + __be16 tot_macs_in_lst; + __be16 gen_count; + struct opa_vnic_iface_mac_entry entry[0]; +} __packed; + +/** + * struct opa_vnic_vema_mad - Generic VEMA MAD + * @mad_hdr: Generic MAD header + * @rmpp_hdr: RMPP header for vendor specific MADs + * @oui: Unique org identifier + * @data: MAD data + */ +struct opa_vnic_vema_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + u8 reserved; + u8 oui[3]; + u8 data[OPA_VNIC_EMA_DATA]; +}; + +/** + * struct opa_vnic_notice_attr - Generic Notice MAD + * @gen_type: Generic/Specific bit and type of notice + * @oui_1: Vendor ID byte 1 + * @oui_2: Vendor ID byte 2 + * @oui_3: Vendor ID byte 3 + * @trap_num: Trap number + * @toggle_count: Notice toggle bit and count value + * @issuer_lid: Trap issuer's lid + * @issuer_gid: Issuer GID (only if Report method) + * @raw_data: Trap message body + */ +struct opa_vnic_notice_attr { + u8 gen_type; + u8 oui_1; + u8 oui_2; + u8 oui_3; + __be16 trap_num; + __be16 toggle_count; + __be32 issuer_lid; + __be32 reserved; + u8 issuer_gid[16]; + u8 raw_data[64]; +} __packed; + +/** + * struct opa_vnic_vema_mad_trap - Generic VEMA MAD Trap + * @mad_hdr: Generic MAD header + * @rmpp_hdr: RMPP header for vendor specific MADs + * @oui: Unique org identifier + * @notice: Notice structure + */ +struct opa_vnic_vema_mad_trap { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + u8 reserved; + u8 oui[3]; + struct opa_vnic_notice_attr notice; +}; + +#endif /* _OPA_VNIC_ENCAP_H */ diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c new file mode 100644 index 000000000..62390e9e0 --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c @@ -0,0 +1,187 @@ +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This file contains OPA VNIC ethtool functions + */ + +#include <linux/ethtool.h> + +#include "opa_vnic_internal.h" + +enum {NETDEV_STATS, VNIC_STATS}; + +struct vnic_stats { + char stat_string[ETH_GSTRING_LEN]; + struct { + int sizeof_stat; + int stat_offset; + }; +}; + +#define VNIC_STAT(m) { FIELD_SIZEOF(struct opa_vnic_stats, m), \ + offsetof(struct opa_vnic_stats, m) } + +static struct vnic_stats vnic_gstrings_stats[] = { + /* NETDEV stats */ + {"rx_packets", VNIC_STAT(netstats.rx_packets)}, + {"tx_packets", VNIC_STAT(netstats.tx_packets)}, + {"rx_bytes", VNIC_STAT(netstats.rx_bytes)}, + {"tx_bytes", VNIC_STAT(netstats.tx_bytes)}, + {"rx_errors", VNIC_STAT(netstats.rx_errors)}, + {"tx_errors", VNIC_STAT(netstats.tx_errors)}, + {"rx_dropped", VNIC_STAT(netstats.rx_dropped)}, + {"tx_dropped", VNIC_STAT(netstats.tx_dropped)}, + + /* SUMMARY counters */ + {"tx_unicast", VNIC_STAT(tx_grp.unicast)}, + {"tx_mcastbcast", VNIC_STAT(tx_grp.mcastbcast)}, + {"tx_untagged", VNIC_STAT(tx_grp.untagged)}, + {"tx_vlan", VNIC_STAT(tx_grp.vlan)}, + + {"tx_64_size", VNIC_STAT(tx_grp.s_64)}, + {"tx_65_127", VNIC_STAT(tx_grp.s_65_127)}, + {"tx_128_255", VNIC_STAT(tx_grp.s_128_255)}, + {"tx_256_511", VNIC_STAT(tx_grp.s_256_511)}, + {"tx_512_1023", VNIC_STAT(tx_grp.s_512_1023)}, + {"tx_1024_1518", VNIC_STAT(tx_grp.s_1024_1518)}, + {"tx_1519_max", VNIC_STAT(tx_grp.s_1519_max)}, + + {"rx_unicast", VNIC_STAT(rx_grp.unicast)}, + {"rx_mcastbcast", VNIC_STAT(rx_grp.mcastbcast)}, + {"rx_untagged", VNIC_STAT(rx_grp.untagged)}, + {"rx_vlan", VNIC_STAT(rx_grp.vlan)}, + + {"rx_64_size", VNIC_STAT(rx_grp.s_64)}, + {"rx_65_127", VNIC_STAT(rx_grp.s_65_127)}, + {"rx_128_255", VNIC_STAT(rx_grp.s_128_255)}, + {"rx_256_511", VNIC_STAT(rx_grp.s_256_511)}, + {"rx_512_1023", VNIC_STAT(rx_grp.s_512_1023)}, + {"rx_1024_1518", VNIC_STAT(rx_grp.s_1024_1518)}, + {"rx_1519_max", VNIC_STAT(rx_grp.s_1519_max)}, + + /* ERROR counters */ + {"rx_fifo_errors", VNIC_STAT(netstats.rx_fifo_errors)}, + {"rx_length_errors", VNIC_STAT(netstats.rx_length_errors)}, + + {"tx_fifo_errors", VNIC_STAT(netstats.tx_fifo_errors)}, + {"tx_carrier_errors", VNIC_STAT(netstats.tx_carrier_errors)}, + + {"tx_dlid_zero", VNIC_STAT(tx_dlid_zero)}, + {"tx_drop_state", VNIC_STAT(tx_drop_state)}, + {"rx_drop_state", VNIC_STAT(rx_drop_state)}, + {"rx_oversize", VNIC_STAT(rx_oversize)}, + {"rx_runt", VNIC_STAT(rx_runt)}, +}; + +#define VNIC_STATS_LEN ARRAY_SIZE(vnic_gstrings_stats) + +/* vnic_get_drvinfo - get driver info */ +static void vnic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, opa_vnic_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent), + sizeof(drvinfo->bus_info)); +} + +/* vnic_get_sset_count - get string set count */ +static int vnic_get_sset_count(struct net_device *netdev, int sset) +{ + return (sset == ETH_SS_STATS) ? VNIC_STATS_LEN : -EOPNOTSUPP; +} + +/* vnic_get_ethtool_stats - get statistics */ +static void vnic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + struct opa_vnic_stats vstats; + int i; + + memset(&vstats, 0, sizeof(vstats)); + spin_lock(&adapter->stats_lock); + adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats); + spin_unlock(&adapter->stats_lock); + for (i = 0; i < VNIC_STATS_LEN; i++) { + char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset; + + data[i] = (vnic_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +/* vnic_get_strings - get strings */ +static void vnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < VNIC_STATS_LEN; i++) + memcpy(data + i * ETH_GSTRING_LEN, + vnic_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); +} + +/* ethtool ops */ +static const struct ethtool_ops opa_vnic_ethtool_ops = { + .get_drvinfo = vnic_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = vnic_get_strings, + .get_sset_count = vnic_get_sset_count, + .get_ethtool_stats = vnic_get_ethtool_stats, +}; + +/* opa_vnic_set_ethtool_ops - set ethtool ops */ +void opa_vnic_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &opa_vnic_ethtool_ops; +} diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h new file mode 100644 index 000000000..43ac61ffe --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h @@ -0,0 +1,330 @@ +#ifndef _OPA_VNIC_INTERNAL_H +#define _OPA_VNIC_INTERNAL_H +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This file contains OPA VNIC driver internal declarations + */ + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/hashtable.h> +#include <linux/sizes.h> +#include <rdma/opa_vnic.h> + +#include "opa_vnic_encap.h" + +#define OPA_VNIC_VLAN_PCP(vlan_tci) \ + (((vlan_tci) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) + +/* Flow to default port redirection table size */ +#define OPA_VNIC_FLOW_TBL_SIZE 32 + +/* Invalid port number */ +#define OPA_VNIC_INVALID_PORT 0xff + +struct opa_vnic_adapter; + +/** + * struct __opa_vesw_info - OPA vnic virtual switch info + * + * Same as opa_vesw_info without bitwise attribute. + */ +struct __opa_vesw_info { + u16 fabric_id; + u16 vesw_id; + + u8 rsvd0[6]; + u16 def_port_mask; + + u8 rsvd1[2]; + u16 pkey; + + u8 rsvd2[4]; + u32 u_mcast_dlid; + u32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT]; + + u32 rc; + + u8 rsvd3[56]; + u16 eth_mtu; + u8 rsvd4[2]; +} __packed; + +/** + * struct __opa_per_veswport_info - OPA vnic per port info + * + * Same as opa_per_veswport_info without bitwise attribute. + */ +struct __opa_per_veswport_info { + u32 port_num; + + u8 eth_link_status; + u8 rsvd0[3]; + + u8 base_mac_addr[ETH_ALEN]; + u8 config_state; + u8 oper_state; + + u16 max_mac_tbl_ent; + u16 max_smac_ent; + u32 mac_tbl_digest; + u8 rsvd1[4]; + + u32 encap_slid; + + u8 pcp_to_sc_uc[OPA_VNIC_MAX_NUM_PCP]; + u8 pcp_to_vl_uc[OPA_VNIC_MAX_NUM_PCP]; + u8 pcp_to_sc_mc[OPA_VNIC_MAX_NUM_PCP]; + u8 pcp_to_vl_mc[OPA_VNIC_MAX_NUM_PCP]; + + u8 non_vlan_sc_uc; + u8 non_vlan_vl_uc; + u8 non_vlan_sc_mc; + u8 non_vlan_vl_mc; + + u8 rsvd2[48]; + + u16 uc_macs_gen_count; + u16 mc_macs_gen_count; + + u8 rsvd3[8]; +} __packed; + +/** + * struct __opa_veswport_info - OPA vnic port info + * + * Same as opa_veswport_info without bitwise attribute. + */ +struct __opa_veswport_info { + struct __opa_vesw_info vesw; + struct __opa_per_veswport_info vport; +}; + +/** + * struct __opa_veswport_trap - OPA vnic trap info + * + * Same as opa_veswport_trap without bitwise attribute. + */ +struct __opa_veswport_trap { + u16 fabric_id; + u16 veswid; + u32 veswportnum; + u16 opaportnum; + u8 veswportindex; + u8 opcode; + u32 reserved; +} __packed; + +/** + * struct opa_vnic_ctrl_port - OPA virtual NIC control port + * @ibdev: pointer to ib device + * @ops: opa vnic control operations + * @num_ports: number of opa ports + */ +struct opa_vnic_ctrl_port { + struct ib_device *ibdev; + struct opa_vnic_ctrl_ops *ops; + u8 num_ports; +}; + +/** + * struct opa_vnic_adapter - OPA VNIC netdev private data structure + * @netdev: pointer to associated netdev + * @ibdev: ib device + * @cport: pointer to opa vnic control port + * @rn_ops: rdma netdev's net_device_ops + * @port_num: OPA port number + * @vport_num: vesw port number + * @lock: adapter lock + * @info: virtual ethernet switch port information + * @vema_mac_addr: mac address configured by vema + * @umac_hash: unicast maclist hash + * @mmac_hash: multicast maclist hash + * @mactbl: hash table of MAC entries + * @mactbl_lock: mac table lock + * @stats_lock: statistics lock + * @flow_tbl: flow to default port redirection table + * @trap_timeout: trap timeout + * @trap_count: no. of traps allowed within timeout period + */ +struct opa_vnic_adapter { + struct net_device *netdev; + struct ib_device *ibdev; + struct opa_vnic_ctrl_port *cport; + const struct net_device_ops *rn_ops; + + u8 port_num; + u8 vport_num; + + /* Lock used around concurrent updates to netdev */ + struct mutex lock; + + struct __opa_veswport_info info; + u8 vema_mac_addr[ETH_ALEN]; + u32 umac_hash; + u32 mmac_hash; + struct hlist_head __rcu *mactbl; + + /* Lock used to protect updates to mac table */ + struct mutex mactbl_lock; + + /* Lock used to protect access to vnic counters */ + spinlock_t stats_lock; + + u8 flow_tbl[OPA_VNIC_FLOW_TBL_SIZE]; + + unsigned long trap_timeout; + u8 trap_count; +}; + +/* Same as opa_veswport_mactable_entry, but without bitwise attribute */ +struct __opa_vnic_mactable_entry { + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; + u32 dlid_sd; +} __packed; + +/** + * struct opa_vnic_mac_tbl_node - OPA VNIC mac table node + * @hlist: hash list handle + * @index: index of entry in the mac table + * @entry: entry in the table + */ +struct opa_vnic_mac_tbl_node { + struct hlist_node hlist; + u16 index; + struct __opa_vnic_mactable_entry entry; +}; + +#define v_dbg(format, arg...) \ + netdev_dbg(adapter->netdev, format, ## arg) +#define v_err(format, arg...) \ + netdev_err(adapter->netdev, format, ## arg) +#define v_info(format, arg...) \ + netdev_info(adapter->netdev, format, ## arg) +#define v_warn(format, arg...) \ + netdev_warn(adapter->netdev, format, ## arg) + +#define c_err(format, arg...) \ + dev_err(&cport->ibdev->dev, format, ## arg) +#define c_info(format, arg...) \ + dev_info(&cport->ibdev->dev, format, ## arg) +#define c_dbg(format, arg...) \ + dev_dbg(&cport->ibdev->dev, format, ## arg) + +/* The maximum allowed entries in the mac table */ +#define OPA_VNIC_MAC_TBL_MAX_ENTRIES 2048 +/* Limit of smac entries in mac table */ +#define OPA_VNIC_MAX_SMAC_LIMIT 256 + +/* The last octet of the MAC address is used as the key to the hash table */ +#define OPA_VNIC_MAC_HASH_IDX 5 + +/* The VNIC MAC hash table is of size 2^8 */ +#define OPA_VNIC_MAC_TBL_HASH_BITS 8 +#define OPA_VNIC_MAC_TBL_SIZE BIT(OPA_VNIC_MAC_TBL_HASH_BITS) + +/* VNIC HASH MACROS */ +#define vnic_hash_init(hashtable) __hash_init(hashtable, OPA_VNIC_MAC_TBL_SIZE) + +#define vnic_hash_add(hashtable, node, key) \ + hlist_add_head(node, \ + &hashtable[hash_min(key, ilog2(OPA_VNIC_MAC_TBL_SIZE))]) + +#define vnic_hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; \ + !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#define vnic_hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, \ + &name[hash_min(key, ilog2(OPA_VNIC_MAC_TBL_SIZE))], member) + +#define vnic_hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; \ + !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \ + hlist_for_each_entry(obj, &name[bkt], member) + +extern char opa_vnic_driver_name[]; +extern const char opa_vnic_driver_version[]; + +struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, + u8 port_num, u8 vport_num); +void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter); +void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb); +u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb); +u8 opa_vnic_calc_entropy(struct sk_buff *skb); +void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter); +void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter); +void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter, + struct opa_veswport_mactable *tbl); +int opa_vnic_update_mac_tbl(struct opa_vnic_adapter *adapter, + struct opa_veswport_mactable *tbl); +void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, + struct opa_veswport_iface_macs *macs); +void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter, + struct opa_veswport_iface_macs *macs); +void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter, + struct opa_veswport_summary_counters *cntrs); +void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter, + struct opa_veswport_error_counters *cntrs); +void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter, + struct opa_vesw_info *info); +void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter, + struct opa_vesw_info *info); +void opa_vnic_get_per_veswport_info(struct opa_vnic_adapter *adapter, + struct opa_per_veswport_info *info); +void opa_vnic_set_per_veswport_info(struct opa_vnic_adapter *adapter, + struct opa_per_veswport_info *info); +void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event); +void opa_vnic_set_ethtool_ops(struct net_device *netdev); +void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter, + struct __opa_veswport_trap *data, u32 lid); + +#endif /* _OPA_VNIC_INTERNAL_H */ diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c new file mode 100644 index 000000000..61558788b --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -0,0 +1,403 @@ +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This file contains OPA Virtual Network Interface Controller (VNIC) driver + * netdev functionality. + */ + +#include <linux/module.h> +#include <linux/if_vlan.h> +#include <linux/crc32.h> + +#include "opa_vnic_internal.h" + +#define OPA_TX_TIMEOUT_MS 1000 + +#define OPA_VNIC_SKB_HEADROOM \ + ALIGN((OPA_VNIC_HDR_LEN + OPA_VNIC_SKB_MDATA_LEN), 8) + +/* This function is overloaded for opa_vnic specific implementation */ +static void opa_vnic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + struct opa_vnic_stats vstats; + + memset(&vstats, 0, sizeof(vstats)); + spin_lock(&adapter->stats_lock); + adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats); + spin_unlock(&adapter->stats_lock); + memcpy(stats, &vstats.netstats, sizeof(*stats)); +} + +/* opa_netdev_start_xmit - transmit function */ +static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + + v_dbg("xmit: queue %d skb len %d\n", skb->queue_mapping, skb->len); + /* pad to ensure mininum ethernet packet length */ + if (unlikely(skb->len < ETH_ZLEN)) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + skb_put(skb, ETH_ZLEN - skb->len); + } + + opa_vnic_encap_skb(adapter, skb); + return adapter->rn_ops->ndo_start_xmit(skb, netdev); +} + +static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + struct opa_vnic_skb_mdata *mdata; + int rc; + + /* pass entropy and vl as metadata in skb */ + mdata = skb_push(skb, sizeof(*mdata)); + mdata->entropy = opa_vnic_calc_entropy(skb); + mdata->vl = opa_vnic_get_vl(adapter, skb); + rc = adapter->rn_ops->ndo_select_queue(netdev, skb, + sb_dev, fallback); + skb_pull(skb, sizeof(*mdata)); + return rc; +} + +static void opa_vnic_update_state(struct opa_vnic_adapter *adapter, bool up) +{ + struct __opa_veswport_info *info = &adapter->info; + + mutex_lock(&adapter->lock); + /* Operational state can only be DROP_ALL or FORWARDING */ + if ((info->vport.config_state == OPA_VNIC_STATE_FORWARDING) && up) { + info->vport.oper_state = OPA_VNIC_STATE_FORWARDING; + info->vport.eth_link_status = OPA_VNIC_ETH_LINK_UP; + } else { + info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL; + info->vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN; + } + + if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING) + netif_dormant_off(adapter->netdev); + else + netif_dormant_on(adapter->netdev); + mutex_unlock(&adapter->lock); +} + +/* opa_vnic_process_vema_config - process vema configuration updates */ +void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter) +{ + struct __opa_veswport_info *info = &adapter->info; + struct rdma_netdev *rn = netdev_priv(adapter->netdev); + u8 port_num[OPA_VESW_MAX_NUM_DEF_PORT] = { 0 }; + struct net_device *netdev = adapter->netdev; + u8 i, port_count = 0; + u16 port_mask; + + /* If the base_mac_addr is changed, update the interface mac address */ + if (memcmp(info->vport.base_mac_addr, adapter->vema_mac_addr, + ARRAY_SIZE(info->vport.base_mac_addr))) { + struct sockaddr saddr; + + memcpy(saddr.sa_data, info->vport.base_mac_addr, + ARRAY_SIZE(info->vport.base_mac_addr)); + mutex_lock(&adapter->lock); + eth_commit_mac_addr_change(netdev, &saddr); + memcpy(adapter->vema_mac_addr, + info->vport.base_mac_addr, ETH_ALEN); + mutex_unlock(&adapter->lock); + } + + rn->set_id(netdev, info->vesw.vesw_id); + + /* Handle MTU limit change */ + rtnl_lock(); + netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu, + netdev->min_mtu); + if (netdev->mtu > netdev->max_mtu) + dev_set_mtu(netdev, netdev->max_mtu); + rtnl_unlock(); + + /* Update flow to default port redirection table */ + port_mask = info->vesw.def_port_mask; + for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++) { + if (port_mask & 1) + port_num[port_count++] = i; + port_mask >>= 1; + } + + /* + * Build the flow table. Flow table is required when destination LID + * is not available. Up to OPA_VNIC_FLOW_TBL_SIZE flows supported. + * Each flow need a default port number to get its dlid from the + * u_ucast_dlid array. + */ + for (i = 0; i < OPA_VNIC_FLOW_TBL_SIZE; i++) + adapter->flow_tbl[i] = port_count ? port_num[i % port_count] : + OPA_VNIC_INVALID_PORT; + + /* update state */ + opa_vnic_update_state(adapter, !!(netdev->flags & IFF_UP)); +} + +/* + * Set the power on default values in adapter's vema interface structure. + */ +static inline void opa_vnic_set_pod_values(struct opa_vnic_adapter *adapter) +{ + adapter->info.vport.max_mac_tbl_ent = OPA_VNIC_MAC_TBL_MAX_ENTRIES; + adapter->info.vport.max_smac_ent = OPA_VNIC_MAX_SMAC_LIMIT; + adapter->info.vport.config_state = OPA_VNIC_STATE_DROP_ALL; + adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN; + adapter->info.vesw.eth_mtu = ETH_DATA_LEN; +} + +/* opa_vnic_set_mac_addr - change mac address */ +static int opa_vnic_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + struct sockaddr *sa = addr; + int rc; + + if (!memcmp(netdev->dev_addr, sa->sa_data, ETH_ALEN)) + return 0; + + mutex_lock(&adapter->lock); + rc = eth_mac_addr(netdev, addr); + mutex_unlock(&adapter->lock); + if (rc) + return rc; + + adapter->info.vport.uc_macs_gen_count++; + opa_vnic_vema_report_event(adapter, + OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE); + return 0; +} + +/* + * opa_vnic_mac_send_event - post event on possible mac list exchange + * Send trap when digest from uc/mc mac list differs from previous run. + * Digest is evaluated similar to how cksum does. + */ +static void opa_vnic_mac_send_event(struct net_device *netdev, u8 event) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + struct netdev_hw_addr *ha; + struct netdev_hw_addr_list *hw_list; + u32 *ref_crc; + u32 l, crc = 0; + + switch (event) { + case OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE: + hw_list = &netdev->uc; + adapter->info.vport.uc_macs_gen_count++; + ref_crc = &adapter->umac_hash; + break; + case OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE: + hw_list = &netdev->mc; + adapter->info.vport.mc_macs_gen_count++; + ref_crc = &adapter->mmac_hash; + break; + default: + return; + } + netdev_hw_addr_list_for_each(ha, hw_list) { + crc = crc32_le(crc, ha->addr, ETH_ALEN); + } + l = netdev_hw_addr_list_count(hw_list) * ETH_ALEN; + crc = ~crc32_le(crc, (void *)&l, sizeof(l)); + + if (crc != *ref_crc) { + *ref_crc = crc; + opa_vnic_vema_report_event(adapter, event); + } +} + +/* opa_vnic_set_rx_mode - handle uc/mc mac list change */ +static void opa_vnic_set_rx_mode(struct net_device *netdev) +{ + opa_vnic_mac_send_event(netdev, + OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE); + + opa_vnic_mac_send_event(netdev, + OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE); +} + +/* opa_netdev_open - activate network interface */ +static int opa_netdev_open(struct net_device *netdev) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + int rc; + + rc = adapter->rn_ops->ndo_open(adapter->netdev); + if (rc) { + v_dbg("open failed %d\n", rc); + return rc; + } + + /* Update status and send trap */ + opa_vnic_update_state(adapter, true); + opa_vnic_vema_report_event(adapter, + OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE); + return 0; +} + +/* opa_netdev_close - disable network interface */ +static int opa_netdev_close(struct net_device *netdev) +{ + struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); + int rc; + + rc = adapter->rn_ops->ndo_stop(adapter->netdev); + if (rc) { + v_dbg("close failed %d\n", rc); + return rc; + } + + /* Update status and send trap */ + opa_vnic_update_state(adapter, false); + opa_vnic_vema_report_event(adapter, + OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE); + return 0; +} + +/* netdev ops */ +static const struct net_device_ops opa_netdev_ops = { + .ndo_open = opa_netdev_open, + .ndo_stop = opa_netdev_close, + .ndo_start_xmit = opa_netdev_start_xmit, + .ndo_get_stats64 = opa_vnic_get_stats64, + .ndo_set_rx_mode = opa_vnic_set_rx_mode, + .ndo_select_queue = opa_vnic_select_queue, + .ndo_set_mac_address = opa_vnic_set_mac_addr, +}; + +/* opa_vnic_add_netdev - create vnic netdev interface */ +struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, + u8 port_num, u8 vport_num) +{ + struct opa_vnic_adapter *adapter; + struct net_device *netdev; + struct rdma_netdev *rn; + int rc; + + netdev = ibdev->alloc_rdma_netdev(ibdev, port_num, + RDMA_NETDEV_OPA_VNIC, + "veth%d", NET_NAME_UNKNOWN, + ether_setup); + if (!netdev) + return ERR_PTR(-ENOMEM); + else if (IS_ERR(netdev)) + return ERR_CAST(netdev); + + rn = netdev_priv(netdev); + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + rc = -ENOMEM; + goto adapter_err; + } + + rn->clnt_priv = adapter; + rn->hca = ibdev; + rn->port_num = port_num; + adapter->netdev = netdev; + adapter->ibdev = ibdev; + adapter->port_num = port_num; + adapter->vport_num = vport_num; + adapter->rn_ops = netdev->netdev_ops; + + netdev->netdev_ops = &opa_netdev_ops; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM; + mutex_init(&adapter->lock); + mutex_init(&adapter->mactbl_lock); + spin_lock_init(&adapter->stats_lock); + + SET_NETDEV_DEV(netdev, ibdev->dev.parent); + + opa_vnic_set_ethtool_ops(netdev); + + opa_vnic_set_pod_values(adapter); + + rc = register_netdev(netdev); + if (rc) + goto netdev_err; + + netif_carrier_off(netdev); + netif_dormant_on(netdev); + v_info("initialized\n"); + + return adapter; +netdev_err: + mutex_destroy(&adapter->lock); + mutex_destroy(&adapter->mactbl_lock); + kfree(adapter); +adapter_err: + rn->free_rdma_netdev(netdev); + + return ERR_PTR(rc); +} + +/* opa_vnic_rem_netdev - remove vnic netdev interface */ +void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rdma_netdev *rn = netdev_priv(netdev); + + v_info("removing\n"); + unregister_netdev(netdev); + opa_vnic_release_mac_tbl(adapter); + mutex_destroy(&adapter->lock); + mutex_destroy(&adapter->mactbl_lock); + kfree(adapter); + rn->free_rdma_netdev(netdev); +} diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c new file mode 100644 index 000000000..15711dcc6 --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -0,0 +1,1076 @@ +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This file contains OPA Virtual Network Interface Controller (VNIC) + * Ethernet Management Agent (EMA) driver + */ + +#include <linux/module.h> +#include <rdma/ib_addr.h> +#include <rdma/ib_verbs.h> +#include <rdma/opa_smi.h> +#include <rdma/opa_port_info.h> + +#include "opa_vnic_internal.h" + +#define DRV_VERSION "1.0" +char opa_vnic_driver_name[] = "opa_vnic"; +const char opa_vnic_driver_version[] = DRV_VERSION; + +/* + * The trap service level is kept in bits 3 to 7 in the trap_sl_rsvd + * field in the class port info MAD. + */ +#define GET_TRAP_SL_FROM_CLASS_PORT_INFO(x) (((x) >> 3) & 0x1f) + +/* Cap trap bursts to a reasonable limit good for normal cases */ +#define OPA_VNIC_TRAP_BURST_LIMIT 4 + +/* + * VNIC trap limit timeout. + * Inverse of cap2_mask response time out (1.0737 secs) = 0.9 + * secs approx IB spec 13.4.6.2.1 PortInfoSubnetTimeout and + * 13.4.9 Traps. + */ +#define OPA_VNIC_TRAP_TIMEOUT ((4096 * (1UL << 18)) / 1000) + +#define OPA_VNIC_UNSUP_ATTR \ + cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB) + +#define OPA_VNIC_INVAL_ATTR \ + cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE) + +#define OPA_VNIC_CLASS_CAP_TRAP 0x1 + +/* Maximum number of VNIC ports supported */ +#define OPA_VNIC_MAX_NUM_VPORT 255 + +/** + * struct opa_vnic_vema_port -- VNIC VEMA port details + * @cport: pointer to port + * @mad_agent: pointer to mad agent for port + * @class_port_info: Class port info information. + * @tid: Transaction id + * @port_num: OPA port number + * @vport_idr: vnic ports idr + * @event_handler: ib event handler + * @lock: adapter interface lock + */ +struct opa_vnic_vema_port { + struct opa_vnic_ctrl_port *cport; + struct ib_mad_agent *mad_agent; + struct opa_class_port_info class_port_info; + u64 tid; + u8 port_num; + struct idr vport_idr; + struct ib_event_handler event_handler; + + /* Lock to query/update network adapter */ + struct mutex lock; +}; + +static void opa_vnic_vema_add_one(struct ib_device *device); +static void opa_vnic_vema_rem_one(struct ib_device *device, + void *client_data); + +static struct ib_client opa_vnic_client = { + .name = opa_vnic_driver_name, + .add = opa_vnic_vema_add_one, + .remove = opa_vnic_vema_rem_one, +}; + +/** + * vema_get_vport_num -- Get the vnic from the mad + * @recvd_mad: Received mad + * + * Return: returns value of the vnic port number + */ +static inline u8 vema_get_vport_num(struct opa_vnic_vema_mad *recvd_mad) +{ + return be32_to_cpu(recvd_mad->mad_hdr.attr_mod) & 0xff; +} + +/** + * vema_get_vport_adapter -- Get vnic port adapter from recvd mad + * @recvd_mad: received mad + * @port: ptr to port struct on which MAD was recvd + * + * Return: vnic adapter + */ +static inline struct opa_vnic_adapter * +vema_get_vport_adapter(struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_port *port) +{ + u8 vport_num = vema_get_vport_num(recvd_mad); + + return idr_find(&port->vport_idr, vport_num); +} + +/** + * vema_mac_tbl_req_ok -- Check if mac request has correct values + * @mac_tbl: mac table + * + * This function checks for the validity of the offset and number of + * entries required. + * + * Return: true if offset and num_entries are valid + */ +static inline bool vema_mac_tbl_req_ok(struct opa_veswport_mactable *mac_tbl) +{ + u16 offset, num_entries; + u16 req_entries = ((OPA_VNIC_EMA_DATA - sizeof(*mac_tbl)) / + sizeof(mac_tbl->tbl_entries[0])); + + offset = be16_to_cpu(mac_tbl->offset); + num_entries = be16_to_cpu(mac_tbl->num_entries); + + return ((num_entries <= req_entries) && + (offset + num_entries <= OPA_VNIC_MAC_TBL_MAX_ENTRIES)); +} + +/* + * Return the power on default values in the port info structure + * in big endian format as required by MAD. + */ +static inline void vema_get_pod_values(struct opa_veswport_info *port_info) +{ + memset(port_info, 0, sizeof(*port_info)); + port_info->vport.max_mac_tbl_ent = + cpu_to_be16(OPA_VNIC_MAC_TBL_MAX_ENTRIES); + port_info->vport.max_smac_ent = + cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT); + port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL; + port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL; + port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN); +} + +/** + * vema_add_vport -- Add a new vnic port + * @port: ptr to opa_vnic_vema_port struct + * @vport_num: vnic port number (to be added) + * + * Return a pointer to the vnic adapter structure + */ +static struct opa_vnic_adapter *vema_add_vport(struct opa_vnic_vema_port *port, + u8 vport_num) +{ + struct opa_vnic_ctrl_port *cport = port->cport; + struct opa_vnic_adapter *adapter; + + adapter = opa_vnic_add_netdev(cport->ibdev, port->port_num, vport_num); + if (!IS_ERR(adapter)) { + int rc; + + adapter->cport = cport; + rc = idr_alloc(&port->vport_idr, adapter, vport_num, + vport_num + 1, GFP_NOWAIT); + if (rc < 0) { + opa_vnic_rem_netdev(adapter); + adapter = ERR_PTR(rc); + } + } + + return adapter; +} + +/** + * vema_get_class_port_info -- Get class info for port + * @port: Port on whic MAD was received + * @recvd_mad: pointer to the received mad + * @rsp_mad: pointer to respose mad + * + * This function copies the latest class port info value set for the + * port and stores it for generating traps + */ +static void vema_get_class_port_info(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_class_port_info *port_info; + + port_info = (struct opa_class_port_info *)rsp_mad->data; + memcpy(port_info, &port->class_port_info, sizeof(*port_info)); + port_info->base_version = OPA_MGMT_BASE_VERSION, + port_info->class_version = OPA_EMA_CLASS_VERSION; + + /* + * Set capability mask bit indicating agent generates traps, + * and set the maximum number of VNIC ports supported. + */ + port_info->cap_mask = cpu_to_be16((OPA_VNIC_CLASS_CAP_TRAP | + (OPA_VNIC_MAX_NUM_VPORT << 8))); + + /* + * Since a get routine is always sent by the EM first we + * set the expected response time to + * 4.096 usec * 2^18 == 1.0737 sec here. + */ + port_info->cap_mask2_resp_time = cpu_to_be32(18); +} + +/** + * vema_set_class_port_info -- Get class info for port + * @port: Port on whic MAD was received + * @recvd_mad: pointer to the received mad + * @rsp_mad: pointer to respose mad + * + * This function updates the port class info for the specific vnic + * and sets up the response mad data + */ +static void vema_set_class_port_info(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + memcpy(&port->class_port_info, recvd_mad->data, + sizeof(port->class_port_info)); + + vema_get_class_port_info(port, recvd_mad, rsp_mad); +} + +/** + * vema_get_veswport_info -- Get veswport info + * @port: source port on which MAD was received + * @recvd_mad: pointer to the received mad + * @rsp_mad: pointer to respose mad + */ +static void vema_get_veswport_info(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_veswport_info *port_info = + (struct opa_veswport_info *)rsp_mad->data; + struct opa_vnic_adapter *adapter; + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (adapter) { + memset(port_info, 0, sizeof(*port_info)); + opa_vnic_get_vesw_info(adapter, &port_info->vesw); + opa_vnic_get_per_veswport_info(adapter, + &port_info->vport); + } else { + vema_get_pod_values(port_info); + } +} + +/** + * vema_set_veswport_info -- Set veswport info + * @port: source port on which MAD was received + * @recvd_mad: pointer to the received mad + * @rsp_mad: pointer to respose mad + * + * This function gets the port class infor for vnic + */ +static void vema_set_veswport_info(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_vnic_ctrl_port *cport = port->cport; + struct opa_veswport_info *port_info; + struct opa_vnic_adapter *adapter; + u8 vport_num; + + vport_num = vema_get_vport_num(recvd_mad); + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (!adapter) { + adapter = vema_add_vport(port, vport_num); + if (IS_ERR(adapter)) { + c_err("failed to add vport %d: %ld\n", + vport_num, PTR_ERR(adapter)); + goto err_exit; + } + } + + port_info = (struct opa_veswport_info *)recvd_mad->data; + opa_vnic_set_vesw_info(adapter, &port_info->vesw); + opa_vnic_set_per_veswport_info(adapter, &port_info->vport); + + /* Process the new config settings */ + opa_vnic_process_vema_config(adapter); + + vema_get_veswport_info(port, recvd_mad, rsp_mad); + return; + +err_exit: + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; +} + +/** + * vema_get_mac_entries -- Get MAC entries in VNIC MAC table + * @port: source port on which MAD was received + * @recvd_mad: pointer to the received mad + * @rsp_mad: pointer to respose mad + * + * This function gets the MAC entries that are programmed into + * the VNIC MAC forwarding table. It checks for the validity of + * the index into the MAC table and the number of entries that + * are to be retrieved. + */ +static void vema_get_mac_entries(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_veswport_mactable *mac_tbl_in, *mac_tbl_out; + struct opa_vnic_adapter *adapter; + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (!adapter) { + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; + return; + } + + mac_tbl_in = (struct opa_veswport_mactable *)recvd_mad->data; + mac_tbl_out = (struct opa_veswport_mactable *)rsp_mad->data; + + if (vema_mac_tbl_req_ok(mac_tbl_in)) { + mac_tbl_out->offset = mac_tbl_in->offset; + mac_tbl_out->num_entries = mac_tbl_in->num_entries; + opa_vnic_query_mac_tbl(adapter, mac_tbl_out); + } else { + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; + } +} + +/** + * vema_set_mac_entries -- Set MAC entries in VNIC MAC table + * @port: source port on which MAD was received + * @recvd_mad: pointer to the received mad + * @rsp_mad: pointer to respose mad + * + * This function sets the MAC entries in the VNIC forwarding table + * It checks for the validity of the index and the number of forwarding + * table entries to be programmed. + */ +static void vema_set_mac_entries(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_veswport_mactable *mac_tbl; + struct opa_vnic_adapter *adapter; + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (!adapter) { + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; + return; + } + + mac_tbl = (struct opa_veswport_mactable *)recvd_mad->data; + if (vema_mac_tbl_req_ok(mac_tbl)) { + if (opa_vnic_update_mac_tbl(adapter, mac_tbl)) + rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR; + } else { + rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR; + } + vema_get_mac_entries(port, recvd_mad, rsp_mad); +} + +/** + * vema_set_delete_vesw -- Reset VESW info to POD values + * @port: source port on which MAD was received + * @recvd_mad: pointer to the received mad + * @rsp_mad: pointer to respose mad + * + * This function clears all the fields of veswport info for the requested vesw + * and sets them back to the power-on default values. It does not delete the + * vesw. + */ +static void vema_set_delete_vesw(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_veswport_info *port_info = + (struct opa_veswport_info *)rsp_mad->data; + struct opa_vnic_adapter *adapter; + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (!adapter) { + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; + return; + } + + vema_get_pod_values(port_info); + opa_vnic_set_vesw_info(adapter, &port_info->vesw); + opa_vnic_set_per_veswport_info(adapter, &port_info->vport); + + /* Process the new config settings */ + opa_vnic_process_vema_config(adapter); + + opa_vnic_release_mac_tbl(adapter); + + vema_get_veswport_info(port, recvd_mad, rsp_mad); +} + +/** + * vema_get_mac_list -- Get the unicast/multicast macs. + * @port: source port on which MAD was received + * @recvd_mad: Received mad contains fields to set vnic parameters + * @rsp_mad: Response mad to be built + * @attr_id: Attribute ID indicating multicast or unicast mac list + */ +static void vema_get_mac_list(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad, + u16 attr_id) +{ + struct opa_veswport_iface_macs *macs_in, *macs_out; + int max_entries = (OPA_VNIC_EMA_DATA - sizeof(*macs_out)) / ETH_ALEN; + struct opa_vnic_adapter *adapter; + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (!adapter) { + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; + return; + } + + macs_in = (struct opa_veswport_iface_macs *)recvd_mad->data; + macs_out = (struct opa_veswport_iface_macs *)rsp_mad->data; + + macs_out->start_idx = macs_in->start_idx; + if (macs_in->num_macs_in_msg) + macs_out->num_macs_in_msg = macs_in->num_macs_in_msg; + else + macs_out->num_macs_in_msg = cpu_to_be16(max_entries); + + if (attr_id == OPA_EM_ATTR_IFACE_MCAST_MACS) + opa_vnic_query_mcast_macs(adapter, macs_out); + else + opa_vnic_query_ucast_macs(adapter, macs_out); +} + +/** + * vema_get_summary_counters -- Gets summary counters. + * @port: source port on which MAD was received + * @recvd_mad: Received mad contains fields to set vnic parameters + * @rsp_mad: Response mad to be built + */ +static void vema_get_summary_counters(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_veswport_summary_counters *cntrs; + struct opa_vnic_adapter *adapter; + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (adapter) { + cntrs = (struct opa_veswport_summary_counters *)rsp_mad->data; + opa_vnic_get_summary_counters(adapter, cntrs); + } else { + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; + } +} + +/** + * vema_get_error_counters -- Gets summary counters. + * @port: source port on which MAD was received + * @recvd_mad: Received mad contains fields to set vnic parameters + * @rsp_mad: Response mad to be built + */ +static void vema_get_error_counters(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + struct opa_veswport_error_counters *cntrs; + struct opa_vnic_adapter *adapter; + + adapter = vema_get_vport_adapter(recvd_mad, port); + if (adapter) { + cntrs = (struct opa_veswport_error_counters *)rsp_mad->data; + opa_vnic_get_error_counters(adapter, cntrs); + } else { + rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR; + } +} + +/** + * vema_get -- Process received get MAD + * @port: source port on which MAD was received + * @recvd_mad: Received mad + * @rsp_mad: Response mad to be built + */ +static void vema_get(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id); + + switch (attr_id) { + case OPA_EM_ATTR_CLASS_PORT_INFO: + vema_get_class_port_info(port, recvd_mad, rsp_mad); + break; + case OPA_EM_ATTR_VESWPORT_INFO: + vema_get_veswport_info(port, recvd_mad, rsp_mad); + break; + case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES: + vema_get_mac_entries(port, recvd_mad, rsp_mad); + break; + case OPA_EM_ATTR_IFACE_UCAST_MACS: + /* fall through */ + case OPA_EM_ATTR_IFACE_MCAST_MACS: + vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id); + break; + case OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS: + vema_get_summary_counters(port, recvd_mad, rsp_mad); + break; + case OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS: + vema_get_error_counters(port, recvd_mad, rsp_mad); + break; + default: + rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR; + break; + } +} + +/** + * vema_set -- Process received set MAD + * @port: source port on which MAD was received + * @recvd_mad: Received mad contains fields to set vnic parameters + * @rsp_mad: Response mad to be built + */ +static void vema_set(struct opa_vnic_vema_port *port, + struct opa_vnic_vema_mad *recvd_mad, + struct opa_vnic_vema_mad *rsp_mad) +{ + u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id); + + switch (attr_id) { + case OPA_EM_ATTR_CLASS_PORT_INFO: + vema_set_class_port_info(port, recvd_mad, rsp_mad); + break; + case OPA_EM_ATTR_VESWPORT_INFO: + vema_set_veswport_info(port, recvd_mad, rsp_mad); + break; + case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES: + vema_set_mac_entries(port, recvd_mad, rsp_mad); + break; + case OPA_EM_ATTR_DELETE_VESW: + vema_set_delete_vesw(port, recvd_mad, rsp_mad); + break; + default: + rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR; + break; + } +} + +/** + * vema_send -- Send handler for VEMA MAD agent + * @mad_agent: pointer to the mad agent + * @mad_wc: pointer to mad send work completion information + * + * Free all the data structures associated with the sent MAD + */ +static void vema_send(struct ib_mad_agent *mad_agent, + struct ib_mad_send_wc *mad_wc) +{ + rdma_destroy_ah(mad_wc->send_buf->ah); + ib_free_send_mad(mad_wc->send_buf); +} + +/** + * vema_recv -- Recv handler for VEMA MAD agent + * @mad_agent: pointer to the mad agent + * @send_buf: Send buffer if found, else NULL + * @mad_wc: pointer to mad send work completion information + * + * Handle only set and get methods and respond to other methods + * as unsupported. Allocate response buffer and address handle + * for the response MAD. + */ +static void vema_recv(struct ib_mad_agent *mad_agent, + struct ib_mad_send_buf *send_buf, + struct ib_mad_recv_wc *mad_wc) +{ + struct opa_vnic_vema_port *port; + struct ib_ah *ah; + struct ib_mad_send_buf *rsp; + struct opa_vnic_vema_mad *vema_mad; + + if (!mad_wc || !mad_wc->recv_buf.mad) + return; + + port = mad_agent->context; + ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, + mad_wc->recv_buf.grh, mad_agent->port_num); + if (IS_ERR(ah)) + goto free_recv_mad; + + rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, + mad_wc->wc->pkey_index, 0, + IB_MGMT_VENDOR_HDR, OPA_VNIC_EMA_DATA, + GFP_KERNEL, OPA_MGMT_BASE_VERSION); + if (IS_ERR(rsp)) + goto err_rsp; + + rsp->ah = ah; + vema_mad = rsp->mad; + memcpy(vema_mad, mad_wc->recv_buf.mad, IB_MGMT_VENDOR_HDR); + vema_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; + vema_mad->mad_hdr.status = 0; + + /* Lock ensures network adapter is not removed */ + mutex_lock(&port->lock); + + switch (mad_wc->recv_buf.mad->mad_hdr.method) { + case IB_MGMT_METHOD_GET: + vema_get(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad, + vema_mad); + break; + case IB_MGMT_METHOD_SET: + vema_set(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad, + vema_mad); + break; + default: + vema_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR; + break; + } + mutex_unlock(&port->lock); + + if (!ib_post_send_mad(rsp, NULL)) { + /* + * with post send successful ah and send mad + * will be destroyed in send handler + */ + goto free_recv_mad; + } + + ib_free_send_mad(rsp); + +err_rsp: + rdma_destroy_ah(ah); +free_recv_mad: + ib_free_recv_mad(mad_wc); +} + +/** + * vema_get_port -- Gets the opa_vnic_vema_port + * @cport: pointer to control dev + * @port_num: Port number + * + * This function loops through the ports and returns + * the opa_vnic_vema port structure that is associated + * with the OPA port number + * + * Return: ptr to requested opa_vnic_vema_port strucure + * if success, NULL if not + */ +static struct opa_vnic_vema_port * +vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num) +{ + struct opa_vnic_vema_port *port = (void *)cport + sizeof(*cport); + + if (port_num > cport->num_ports) + return NULL; + + return port + (port_num - 1); +} + +/** + * opa_vnic_vema_send_trap -- This function sends a trap to the EM + * @adapter: pointer to vnic adapter + * @data: pointer to trap data filled by calling function + * @lid: issuers lid (encap_slid from vesw_port_info) + * + * This function is called from the VNIC driver to send a trap if there + * is somethng the EM should be notified about. These events currently + * are + * 1) UNICAST INTERFACE MACADDRESS changes + * 2) MULTICAST INTERFACE MACADDRESS changes + * 3) ETHERNET LINK STATUS changes + * While allocating the send mad the remote site qpn used is 1 + * as this is the well known QP. + * + */ +void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter, + struct __opa_veswport_trap *data, u32 lid) +{ + struct opa_vnic_ctrl_port *cport = adapter->cport; + struct ib_mad_send_buf *send_buf; + struct opa_vnic_vema_port *port; + struct ib_device *ibp; + struct opa_vnic_vema_mad_trap *trap_mad; + struct opa_class_port_info *class; + struct rdma_ah_attr ah_attr; + struct ib_ah *ah; + struct opa_veswport_trap *trap; + u32 trap_lid; + u16 pkey_idx; + + if (!cport) + goto err_exit; + ibp = cport->ibdev; + port = vema_get_port(cport, data->opaportnum); + if (!port || !port->mad_agent) + goto err_exit; + + if (time_before(jiffies, adapter->trap_timeout)) { + if (adapter->trap_count == OPA_VNIC_TRAP_BURST_LIMIT) { + v_warn("Trap rate exceeded\n"); + goto err_exit; + } else { + adapter->trap_count++; + } + } else { + adapter->trap_count = 0; + } + + class = &port->class_port_info; + /* Set up address handle */ + memset(&ah_attr, 0, sizeof(ah_attr)); + ah_attr.type = rdma_ah_find_type(ibp, port->port_num); + rdma_ah_set_sl(&ah_attr, + GET_TRAP_SL_FROM_CLASS_PORT_INFO(class->trap_sl_rsvd)); + rdma_ah_set_port_num(&ah_attr, port->port_num); + trap_lid = be32_to_cpu(class->trap_lid); + /* + * check for trap lid validity, must not be zero + * The trap sink could change after we fashion the MAD but since traps + * are not guaranteed we won't use a lock as anyway the change will take + * place even with locking. + */ + if (!trap_lid) { + c_err("%s: Invalid dlid\n", __func__); + goto err_exit; + } + + rdma_ah_set_dlid(&ah_attr, trap_lid); + ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr); + if (IS_ERR(ah)) { + c_err("%s:Couldn't create new AH = %p\n", __func__, ah); + c_err("%s:dlid = %d, sl = %d, port = %d\n", __func__, + rdma_ah_get_dlid(&ah_attr), rdma_ah_get_sl(&ah_attr), + rdma_ah_get_port_num(&ah_attr)); + goto err_exit; + } + + if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_FULL, + &pkey_idx) < 0) { + c_err("%s:full key not found, defaulting to partial\n", + __func__); + if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_PARTIAL, + &pkey_idx) < 0) + pkey_idx = 1; + } + + send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0, + IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA, + GFP_ATOMIC, OPA_MGMT_BASE_VERSION); + if (IS_ERR(send_buf)) { + c_err("%s:Couldn't allocate send buf\n", __func__); + goto err_sndbuf; + } + + send_buf->ah = ah; + + /* Set up common MAD hdr */ + trap_mad = send_buf->mad; + trap_mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; + trap_mad->mad_hdr.mgmt_class = OPA_MGMT_CLASS_INTEL_EMA; + trap_mad->mad_hdr.class_version = OPA_EMA_CLASS_VERSION; + trap_mad->mad_hdr.method = IB_MGMT_METHOD_TRAP; + port->tid++; + trap_mad->mad_hdr.tid = cpu_to_be64(port->tid); + trap_mad->mad_hdr.attr_id = IB_SMP_ATTR_NOTICE; + + /* Set up vendor OUI */ + trap_mad->oui[0] = INTEL_OUI_1; + trap_mad->oui[1] = INTEL_OUI_2; + trap_mad->oui[2] = INTEL_OUI_3; + + /* Setup notice attribute portion */ + trap_mad->notice.gen_type = OPA_INTEL_EMA_NOTICE_TYPE_INFO << 1; + trap_mad->notice.oui_1 = INTEL_OUI_1; + trap_mad->notice.oui_2 = INTEL_OUI_2; + trap_mad->notice.oui_3 = INTEL_OUI_3; + trap_mad->notice.issuer_lid = cpu_to_be32(lid); + + /* copy the actual trap data */ + trap = (struct opa_veswport_trap *)trap_mad->notice.raw_data; + trap->fabric_id = cpu_to_be16(data->fabric_id); + trap->veswid = cpu_to_be16(data->veswid); + trap->veswportnum = cpu_to_be32(data->veswportnum); + trap->opaportnum = cpu_to_be16(data->opaportnum); + trap->veswportindex = data->veswportindex; + trap->opcode = data->opcode; + + /* If successful send set up rate limit timeout else bail */ + if (ib_post_send_mad(send_buf, NULL)) { + ib_free_send_mad(send_buf); + } else { + if (adapter->trap_count) + return; + adapter->trap_timeout = jiffies + + usecs_to_jiffies(OPA_VNIC_TRAP_TIMEOUT); + return; + } + +err_sndbuf: + rdma_destroy_ah(ah); +err_exit: + v_err("Aborting trap\n"); +} + +static int vema_rem_vport(int id, void *p, void *data) +{ + struct opa_vnic_adapter *adapter = p; + + opa_vnic_rem_netdev(adapter); + return 0; +} + +static int vema_enable_vport(int id, void *p, void *data) +{ + struct opa_vnic_adapter *adapter = p; + + netif_carrier_on(adapter->netdev); + return 0; +} + +static int vema_disable_vport(int id, void *p, void *data) +{ + struct opa_vnic_adapter *adapter = p; + + netif_carrier_off(adapter->netdev); + return 0; +} + +static void opa_vnic_event(struct ib_event_handler *handler, + struct ib_event *record) +{ + struct opa_vnic_vema_port *port = + container_of(handler, struct opa_vnic_vema_port, event_handler); + struct opa_vnic_ctrl_port *cport = port->cport; + + if (record->element.port_num != port->port_num) + return; + + c_dbg("OPA_VNIC received event %d on device %s port %d\n", + record->event, record->device->name, record->element.port_num); + + if (record->event == IB_EVENT_PORT_ERR) + idr_for_each(&port->vport_idr, vema_disable_vport, NULL); + if (record->event == IB_EVENT_PORT_ACTIVE) + idr_for_each(&port->vport_idr, vema_enable_vport, NULL); +} + +/** + * vema_unregister -- Unregisters agent + * @cport: pointer to control port + * + * This deletes the registration by VEMA for MADs + */ +static void vema_unregister(struct opa_vnic_ctrl_port *cport) +{ + int i; + + for (i = 1; i <= cport->num_ports; i++) { + struct opa_vnic_vema_port *port = vema_get_port(cport, i); + + if (!port->mad_agent) + continue; + + /* Lock ensures no MAD is being processed */ + mutex_lock(&port->lock); + idr_for_each(&port->vport_idr, vema_rem_vport, NULL); + mutex_unlock(&port->lock); + + ib_unregister_mad_agent(port->mad_agent); + port->mad_agent = NULL; + mutex_destroy(&port->lock); + idr_destroy(&port->vport_idr); + ib_unregister_event_handler(&port->event_handler); + } +} + +/** + * vema_register -- Registers agent + * @cport: pointer to control port + * + * This function registers the handlers for the VEMA MADs + * + * Return: returns 0 on success. non zero otherwise + */ +static int vema_register(struct opa_vnic_ctrl_port *cport) +{ + struct ib_mad_reg_req reg_req = { + .mgmt_class = OPA_MGMT_CLASS_INTEL_EMA, + .mgmt_class_version = OPA_MGMT_BASE_VERSION, + .oui = { INTEL_OUI_1, INTEL_OUI_2, INTEL_OUI_3 } + }; + int i; + + set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); + set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); + + /* register ib event handler and mad agent for each port on dev */ + for (i = 1; i <= cport->num_ports; i++) { + struct opa_vnic_vema_port *port = vema_get_port(cport, i); + int ret; + + port->cport = cport; + port->port_num = i; + + INIT_IB_EVENT_HANDLER(&port->event_handler, + cport->ibdev, opa_vnic_event); + ib_register_event_handler(&port->event_handler); + + idr_init(&port->vport_idr); + mutex_init(&port->lock); + port->mad_agent = ib_register_mad_agent(cport->ibdev, i, + IB_QPT_GSI, ®_req, + IB_MGMT_RMPP_VERSION, + vema_send, vema_recv, + port, 0); + if (IS_ERR(port->mad_agent)) { + ret = PTR_ERR(port->mad_agent); + port->mad_agent = NULL; + mutex_destroy(&port->lock); + idr_destroy(&port->vport_idr); + vema_unregister(cport); + return ret; + } + } + + return 0; +} + +/** + * opa_vnic_ctrl_config_dev -- This function sends a trap to the EM + * by way of ib_modify_port to indicate support for ethernet on the + * fabric. + * @cport: pointer to control port + * @en: enable or disable ethernet on fabric support + */ +static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en) +{ + struct ib_port_modify pm = { 0 }; + int i; + + if (en) + pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported; + else + pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported; + + for (i = 1; i <= cport->num_ports; i++) + ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm); +} + +/** + * opa_vnic_vema_add_one -- Handle new ib device + * @device: ib device pointer + * + * Allocate the vnic control port and initialize it. + */ +static void opa_vnic_vema_add_one(struct ib_device *device) +{ + struct opa_vnic_ctrl_port *cport; + int rc, size = sizeof(*cport); + + if (!rdma_cap_opa_vnic(device)) + return; + + size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port); + cport = kzalloc(size, GFP_KERNEL); + if (!cport) + return; + + cport->num_ports = device->phys_port_cnt; + cport->ibdev = device; + + /* Initialize opa vnic management agent (vema) */ + rc = vema_register(cport); + if (!rc) + c_info("VNIC client initialized\n"); + + ib_set_client_data(device, &opa_vnic_client, cport); + opa_vnic_ctrl_config_dev(cport, true); +} + +/** + * opa_vnic_vema_rem_one -- Handle ib device removal + * @device: ib device pointer + * @client_data: ib client data + * + * Uninitialize and free the vnic control port. + */ +static void opa_vnic_vema_rem_one(struct ib_device *device, + void *client_data) +{ + struct opa_vnic_ctrl_port *cport = client_data; + + if (!cport) + return; + + c_info("removing VNIC client\n"); + opa_vnic_ctrl_config_dev(cport, false); + vema_unregister(cport); + kfree(cport); +} + +static int __init opa_vnic_init(void) +{ + int rc; + + pr_info("OPA Virtual Network Driver - v%s\n", + opa_vnic_driver_version); + + rc = ib_register_client(&opa_vnic_client); + if (rc) + pr_err("VNIC driver register failed %d\n", rc); + + return rc; +} +module_init(opa_vnic_init); + +static void opa_vnic_deinit(void) +{ + ib_unregister_client(&opa_vnic_client); +} +module_exit(opa_vnic_deinit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel OPA Virtual Network driver"); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c new file mode 100644 index 000000000..868b5aec1 --- /dev/null +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c @@ -0,0 +1,390 @@ +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * This file contains OPA VNIC EMA Interface functions. + */ + +#include "opa_vnic_internal.h" + +/** + * opa_vnic_vema_report_event - sent trap to report the specified event + * @adapter: vnic port adapter + * @event: event to be reported + * + * This function calls vema api to sent a trap for the given event. + */ +void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event) +{ + struct __opa_veswport_info *info = &adapter->info; + struct __opa_veswport_trap trap_data; + + trap_data.fabric_id = info->vesw.fabric_id; + trap_data.veswid = info->vesw.vesw_id; + trap_data.veswportnum = info->vport.port_num; + trap_data.opaportnum = adapter->port_num; + trap_data.veswportindex = adapter->vport_num; + trap_data.opcode = event; + + opa_vnic_vema_send_trap(adapter, &trap_data, info->vport.encap_slid); +} + +/** + * opa_vnic_get_error_counters - get summary counters + * @adapter: vnic port adapter + * @cntrs: pointer to destination summary counters structure + * + * This function populates the summary counters that is maintained by the + * given adapter to destination address provided. + */ +void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter, + struct opa_veswport_summary_counters *cntrs) +{ + struct opa_vnic_stats vstats; + __be64 *dst; + u64 *src; + + memset(&vstats, 0, sizeof(vstats)); + spin_lock(&adapter->stats_lock); + adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats); + spin_unlock(&adapter->stats_lock); + + cntrs->vp_instance = cpu_to_be16(adapter->vport_num); + cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id); + cntrs->veswport_num = cpu_to_be32(adapter->port_num); + + cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors); + cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors); + cntrs->tx_packets = cpu_to_be64(vstats.netstats.tx_packets); + cntrs->rx_packets = cpu_to_be64(vstats.netstats.rx_packets); + cntrs->tx_bytes = cpu_to_be64(vstats.netstats.tx_bytes); + cntrs->rx_bytes = cpu_to_be64(vstats.netstats.rx_bytes); + + /* + * This loop depends on layout of + * opa_veswport_summary_counters opa_vnic_stats structures. + */ + for (dst = &cntrs->tx_unicast, src = &vstats.tx_grp.unicast; + dst < &cntrs->reserved[0]; dst++, src++) { + *dst = cpu_to_be64(*src); + } +} + +/** + * opa_vnic_get_error_counters - get error counters + * @adapter: vnic port adapter + * @cntrs: pointer to destination error counters structure + * + * This function populates the error counters that is maintained by the + * given adapter to destination address provided. + */ +void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter, + struct opa_veswport_error_counters *cntrs) +{ + struct opa_vnic_stats vstats; + + memset(&vstats, 0, sizeof(vstats)); + spin_lock(&adapter->stats_lock); + adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats); + spin_unlock(&adapter->stats_lock); + + cntrs->vp_instance = cpu_to_be16(adapter->vport_num); + cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id); + cntrs->veswport_num = cpu_to_be32(adapter->port_num); + + cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors); + cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors); + cntrs->tx_dlid_zero = cpu_to_be64(vstats.tx_dlid_zero); + cntrs->tx_drop_state = cpu_to_be64(vstats.tx_drop_state); + cntrs->tx_logic = cpu_to_be64(vstats.netstats.tx_fifo_errors + + vstats.netstats.tx_carrier_errors); + + cntrs->rx_bad_veswid = cpu_to_be64(vstats.netstats.rx_nohandler); + cntrs->rx_runt = cpu_to_be64(vstats.rx_runt); + cntrs->rx_oversize = cpu_to_be64(vstats.rx_oversize); + cntrs->rx_drop_state = cpu_to_be64(vstats.rx_drop_state); + cntrs->rx_logic = cpu_to_be64(vstats.netstats.rx_fifo_errors); +} + +/** + * opa_vnic_get_vesw_info -- Get the vesw information + * @adapter: vnic port adapter + * @info: pointer to destination vesw info structure + * + * This function copies the vesw info that is maintained by the + * given adapter to destination address provided. + */ +void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter, + struct opa_vesw_info *info) +{ + struct __opa_vesw_info *src = &adapter->info.vesw; + int i; + + info->fabric_id = cpu_to_be16(src->fabric_id); + info->vesw_id = cpu_to_be16(src->vesw_id); + memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0)); + info->def_port_mask = cpu_to_be16(src->def_port_mask); + memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1)); + info->pkey = cpu_to_be16(src->pkey); + + memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2)); + info->u_mcast_dlid = cpu_to_be32(src->u_mcast_dlid); + for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++) + info->u_ucast_dlid[i] = cpu_to_be32(src->u_ucast_dlid[i]); + + info->rc = cpu_to_be32(src->rc); + + memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3)); + info->eth_mtu = cpu_to_be16(src->eth_mtu); + memcpy(info->rsvd4, src->rsvd4, ARRAY_SIZE(src->rsvd4)); +} + +/** + * opa_vnic_set_vesw_info -- Set the vesw information + * @adapter: vnic port adapter + * @info: pointer to vesw info structure + * + * This function updates the vesw info that is maintained by the + * given adapter with vesw info provided. Reserved fields are stored + * and returned back to EM as is. + */ +void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter, + struct opa_vesw_info *info) +{ + struct __opa_vesw_info *dst = &adapter->info.vesw; + int i; + + dst->fabric_id = be16_to_cpu(info->fabric_id); + dst->vesw_id = be16_to_cpu(info->vesw_id); + memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0)); + dst->def_port_mask = be16_to_cpu(info->def_port_mask); + memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1)); + dst->pkey = be16_to_cpu(info->pkey); + + memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2)); + dst->u_mcast_dlid = be32_to_cpu(info->u_mcast_dlid); + for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++) + dst->u_ucast_dlid[i] = be32_to_cpu(info->u_ucast_dlid[i]); + + dst->rc = be32_to_cpu(info->rc); + + memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3)); + dst->eth_mtu = be16_to_cpu(info->eth_mtu); + memcpy(dst->rsvd4, info->rsvd4, ARRAY_SIZE(info->rsvd4)); +} + +/** + * opa_vnic_get_per_veswport_info -- Get the vesw per port information + * @adapter: vnic port adapter + * @info: pointer to destination vport info structure + * + * This function copies the vesw per port info that is maintained by the + * given adapter to destination address provided. + * Note that the read only fields are not copied. + */ +void opa_vnic_get_per_veswport_info(struct opa_vnic_adapter *adapter, + struct opa_per_veswport_info *info) +{ + struct __opa_per_veswport_info *src = &adapter->info.vport; + + info->port_num = cpu_to_be32(src->port_num); + info->eth_link_status = src->eth_link_status; + memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0)); + + memcpy(info->base_mac_addr, src->base_mac_addr, + ARRAY_SIZE(info->base_mac_addr)); + info->config_state = src->config_state; + info->oper_state = src->oper_state; + info->max_mac_tbl_ent = cpu_to_be16(src->max_mac_tbl_ent); + info->max_smac_ent = cpu_to_be16(src->max_smac_ent); + info->mac_tbl_digest = cpu_to_be32(src->mac_tbl_digest); + memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1)); + + info->encap_slid = cpu_to_be32(src->encap_slid); + memcpy(info->pcp_to_sc_uc, src->pcp_to_sc_uc, + ARRAY_SIZE(info->pcp_to_sc_uc)); + memcpy(info->pcp_to_vl_uc, src->pcp_to_vl_uc, + ARRAY_SIZE(info->pcp_to_vl_uc)); + memcpy(info->pcp_to_sc_mc, src->pcp_to_sc_mc, + ARRAY_SIZE(info->pcp_to_sc_mc)); + memcpy(info->pcp_to_vl_mc, src->pcp_to_vl_mc, + ARRAY_SIZE(info->pcp_to_vl_mc)); + info->non_vlan_sc_uc = src->non_vlan_sc_uc; + info->non_vlan_vl_uc = src->non_vlan_vl_uc; + info->non_vlan_sc_mc = src->non_vlan_sc_mc; + info->non_vlan_vl_mc = src->non_vlan_vl_mc; + memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2)); + + info->uc_macs_gen_count = cpu_to_be16(src->uc_macs_gen_count); + info->mc_macs_gen_count = cpu_to_be16(src->mc_macs_gen_count); + memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3)); +} + +/** + * opa_vnic_set_per_veswport_info -- Set vesw per port information + * @adapter: vnic port adapter + * @info: pointer to vport info structure + * + * This function updates the vesw per port info that is maintained by the + * given adapter with vesw per port info provided. Reserved fields are + * stored and returned back to EM as is. + */ +void opa_vnic_set_per_veswport_info(struct opa_vnic_adapter *adapter, + struct opa_per_veswport_info *info) +{ + struct __opa_per_veswport_info *dst = &adapter->info.vport; + + dst->port_num = be32_to_cpu(info->port_num); + memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0)); + + memcpy(dst->base_mac_addr, info->base_mac_addr, + ARRAY_SIZE(dst->base_mac_addr)); + dst->config_state = info->config_state; + memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1)); + + dst->encap_slid = be32_to_cpu(info->encap_slid); + memcpy(dst->pcp_to_sc_uc, info->pcp_to_sc_uc, + ARRAY_SIZE(dst->pcp_to_sc_uc)); + memcpy(dst->pcp_to_vl_uc, info->pcp_to_vl_uc, + ARRAY_SIZE(dst->pcp_to_vl_uc)); + memcpy(dst->pcp_to_sc_mc, info->pcp_to_sc_mc, + ARRAY_SIZE(dst->pcp_to_sc_mc)); + memcpy(dst->pcp_to_vl_mc, info->pcp_to_vl_mc, + ARRAY_SIZE(dst->pcp_to_vl_mc)); + dst->non_vlan_sc_uc = info->non_vlan_sc_uc; + dst->non_vlan_vl_uc = info->non_vlan_vl_uc; + dst->non_vlan_sc_mc = info->non_vlan_sc_mc; + dst->non_vlan_vl_mc = info->non_vlan_vl_mc; + memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2)); + memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3)); +} + +/** + * opa_vnic_query_mcast_macs - query multicast mac list + * @adapter: vnic port adapter + * @macs: pointer mac list + * + * This function populates the provided mac list with the configured + * multicast addresses in the adapter. + */ +void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter, + struct opa_veswport_iface_macs *macs) +{ + u16 start_idx, num_macs, idx = 0, count = 0; + struct netdev_hw_addr *ha; + + start_idx = be16_to_cpu(macs->start_idx); + num_macs = be16_to_cpu(macs->num_macs_in_msg); + netdev_for_each_mc_addr(ha, adapter->netdev) { + struct opa_vnic_iface_mac_entry *entry = &macs->entry[count]; + + if (start_idx > idx++) + continue; + else if (num_macs == count) + break; + memcpy(entry, ha->addr, sizeof(*entry)); + count++; + } + + macs->tot_macs_in_lst = cpu_to_be16(netdev_mc_count(adapter->netdev)); + macs->num_macs_in_msg = cpu_to_be16(count); + macs->gen_count = cpu_to_be16(adapter->info.vport.mc_macs_gen_count); +} + +/** + * opa_vnic_query_ucast_macs - query unicast mac list + * @adapter: vnic port adapter + * @macs: pointer mac list + * + * This function populates the provided mac list with the configured + * unicast addresses in the adapter. + */ +void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, + struct opa_veswport_iface_macs *macs) +{ + u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0; + struct netdev_hw_addr *ha; + + start_idx = be16_to_cpu(macs->start_idx); + num_macs = be16_to_cpu(macs->num_macs_in_msg); + /* loop through dev_addrs list first */ + for_each_dev_addr(adapter->netdev, ha) { + struct opa_vnic_iface_mac_entry *entry = &macs->entry[count]; + + /* Do not include EM specified MAC address */ + if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr, + ARRAY_SIZE(adapter->info.vport.base_mac_addr))) { + em_macs++; + continue; + } + + if (start_idx > idx++) + continue; + else if (num_macs == count) + break; + memcpy(entry, ha->addr, sizeof(*entry)); + count++; + } + + /* loop through uc list */ + netdev_for_each_uc_addr(ha, adapter->netdev) { + struct opa_vnic_iface_mac_entry *entry = &macs->entry[count]; + + if (start_idx > idx++) + continue; + else if (num_macs == count) + break; + memcpy(entry, ha->addr, sizeof(*entry)); + count++; + } + + tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) + + netdev_uc_count(adapter->netdev) - em_macs; + macs->tot_macs_in_lst = cpu_to_be16(tot_macs); + macs->num_macs_in_msg = cpu_to_be16(count); + macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count); +} diff --git a/drivers/infiniband/ulp/srp/Kbuild b/drivers/infiniband/ulp/srp/Kbuild new file mode 100644 index 000000000..a16c73c66 --- /dev/null +++ b/drivers/infiniband/ulp/srp/Kbuild @@ -0,0 +1 @@ +obj-$(CONFIG_INFINIBAND_SRP) += ib_srp.o diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig new file mode 100644 index 000000000..99db8fe51 --- /dev/null +++ b/drivers/infiniband/ulp/srp/Kconfig @@ -0,0 +1,12 @@ +config INFINIBAND_SRP + tristate "InfiniBand SCSI RDMA Protocol" + depends on SCSI && INFINIBAND_ADDR_TRANS + select SCSI_SRP_ATTRS + ---help--- + Support for the SCSI RDMA Protocol over InfiniBand. This + allows you to access storage devices that speak SRP over + InfiniBand. + + The SRP protocol is defined by the INCITS T10 technical + committee. See <http://www.t10.org/>. + diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c new file mode 100644 index 000000000..6dcdc42ed --- /dev/null +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -0,0 +1,4266 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/string.h> +#include <linux/parser.h> +#include <linux/random.h> +#include <linux/jiffies.h> +#include <linux/lockdep.h> +#include <linux/inet.h> +#include <rdma/ib_cache.h> + +#include <linux/atomic.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_dbg.h> +#include <scsi/scsi_tcq.h> +#include <scsi/srp.h> +#include <scsi/scsi_transport_srp.h> + +#include "ib_srp.h" + +#define DRV_NAME "ib_srp" +#define PFX DRV_NAME ": " + +MODULE_AUTHOR("Roland Dreier"); +MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); +MODULE_LICENSE("Dual BSD/GPL"); + +#if !defined(CONFIG_DYNAMIC_DEBUG) +#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) +#define DYNAMIC_DEBUG_BRANCH(descriptor) false +#endif + +static unsigned int srp_sg_tablesize; +static unsigned int cmd_sg_entries; +static unsigned int indirect_sg_entries; +static bool allow_ext_sg; +static bool prefer_fr = true; +static bool register_always = true; +static bool never_register; +static int topspin_workarounds = 1; + +module_param(srp_sg_tablesize, uint, 0444); +MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); + +module_param(cmd_sg_entries, uint, 0444); +MODULE_PARM_DESC(cmd_sg_entries, + "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); + +module_param(indirect_sg_entries, uint, 0444); +MODULE_PARM_DESC(indirect_sg_entries, + "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); + +module_param(allow_ext_sg, bool, 0444); +MODULE_PARM_DESC(allow_ext_sg, + "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); + +module_param(topspin_workarounds, int, 0444); +MODULE_PARM_DESC(topspin_workarounds, + "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); + +module_param(prefer_fr, bool, 0444); +MODULE_PARM_DESC(prefer_fr, +"Whether to use fast registration if both FMR and fast registration are supported"); + +module_param(register_always, bool, 0444); +MODULE_PARM_DESC(register_always, + "Use memory registration even for contiguous memory regions"); + +module_param(never_register, bool, 0444); +MODULE_PARM_DESC(never_register, "Never register memory"); + +static const struct kernel_param_ops srp_tmo_ops; + +static int srp_reconnect_delay = 10; +module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); + +static int srp_fast_io_fail_tmo = 15; +module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_io_fail_tmo, + "Number of seconds between the observation of a transport" + " layer error and failing all I/O. \"off\" means that this" + " functionality is disabled."); + +static int srp_dev_loss_tmo = 600; +module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, + S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dev_loss_tmo, + "Maximum number of seconds that the SRP transport should" + " insulate transport layer errors. After this time has been" + " exceeded the SCSI host is removed. Should be" + " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + " if fast_io_fail_tmo has not been set. \"off\" means that" + " this functionality is disabled."); + +static unsigned ch_count; +module_param(ch_count, uint, 0444); +MODULE_PARM_DESC(ch_count, + "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); + +static void srp_add_one(struct ib_device *device); +static void srp_remove_one(struct ib_device *device, void *client_data); +static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, + const char *opname); +static int srp_ib_cm_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event); +static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event); + +static struct scsi_transport_template *ib_srp_transport_template; +static struct workqueue_struct *srp_remove_wq; + +static struct ib_client srp_client = { + .name = "srp", + .add = srp_add_one, + .remove = srp_remove_one +}; + +static struct ib_sa_client srp_sa_client; + +static int srp_tmo_get(char *buffer, const struct kernel_param *kp) +{ + int tmo = *(int *)kp->arg; + + if (tmo >= 0) + return sprintf(buffer, "%d", tmo); + else + return sprintf(buffer, "off"); +} + +static int srp_tmo_set(const char *val, const struct kernel_param *kp) +{ + int tmo, res; + + res = srp_parse_tmo(&tmo, val); + if (res) + goto out; + + if (kp->arg == &srp_reconnect_delay) + res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, + srp_dev_loss_tmo); + else if (kp->arg == &srp_fast_io_fail_tmo) + res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); + else + res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, + tmo); + if (res) + goto out; + *(int *)kp->arg = tmo; + +out: + return res; +} + +static const struct kernel_param_ops srp_tmo_ops = { + .get = srp_tmo_get, + .set = srp_tmo_set, +}; + +static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) +{ + return (struct srp_target_port *) host->hostdata; +} + +static const char *srp_target_info(struct Scsi_Host *host) +{ + return host_to_target(host)->target_name; +} + +static int srp_target_is_topspin(struct srp_target_port *target) +{ + static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; + static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; + + return topspin_workarounds && + (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || + !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); +} + +static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, + gfp_t gfp_mask, + enum dma_data_direction direction) +{ + struct srp_iu *iu; + + iu = kmalloc(sizeof *iu, gfp_mask); + if (!iu) + goto out; + + iu->buf = kzalloc(size, gfp_mask); + if (!iu->buf) + goto out_free_iu; + + iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, + direction); + if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) + goto out_free_buf; + + iu->size = size; + iu->direction = direction; + + return iu; + +out_free_buf: + kfree(iu->buf); +out_free_iu: + kfree(iu); +out: + return NULL; +} + +static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) +{ + if (!iu) + return; + + ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, + iu->direction); + kfree(iu->buf); + kfree(iu); +} + +static void srp_qp_event(struct ib_event *event, void *context) +{ + pr_debug("QP event %s (%d)\n", + ib_event_msg(event->event), event->event); +} + +static int srp_init_ib_qp(struct srp_target_port *target, + struct ib_qp *qp) +{ + struct ib_qp_attr *attr; + int ret; + + attr = kmalloc(sizeof *attr, GFP_KERNEL); + if (!attr) + return -ENOMEM; + + ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, + target->srp_host->port, + be16_to_cpu(target->ib_cm.pkey), + &attr->pkey_index); + if (ret) + goto out; + + attr->qp_state = IB_QPS_INIT; + attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE); + attr->port_num = target->srp_host->port; + + ret = ib_modify_qp(qp, attr, + IB_QP_STATE | + IB_QP_PKEY_INDEX | + IB_QP_ACCESS_FLAGS | + IB_QP_PORT); + +out: + kfree(attr); + return ret; +} + +static int srp_new_ib_cm_id(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + struct ib_cm_id *new_cm_id; + + new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, + srp_ib_cm_handler, ch); + if (IS_ERR(new_cm_id)) + return PTR_ERR(new_cm_id); + + if (ch->ib_cm.cm_id) + ib_destroy_cm_id(ch->ib_cm.cm_id); + ch->ib_cm.cm_id = new_cm_id; + if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev, + target->srp_host->port)) + ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA; + else + ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB; + ch->ib_cm.path.sgid = target->sgid; + ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; + ch->ib_cm.path.pkey = target->ib_cm.pkey; + ch->ib_cm.path.service_id = target->ib_cm.service_id; + + return 0; +} + +static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + struct rdma_cm_id *new_cm_id; + int ret; + + new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(new_cm_id)) { + ret = PTR_ERR(new_cm_id); + new_cm_id = NULL; + goto out; + } + + init_completion(&ch->done); + ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? + (struct sockaddr *)&target->rdma_cm.src : NULL, + (struct sockaddr *)&target->rdma_cm.dst, + SRP_PATH_REC_TIMEOUT_MS); + if (ret) { + pr_err("No route available from %pIS to %pIS (%d)\n", + &target->rdma_cm.src, &target->rdma_cm.dst, ret); + goto out; + } + ret = wait_for_completion_interruptible(&ch->done); + if (ret < 0) + goto out; + + ret = ch->status; + if (ret) { + pr_err("Resolving address %pIS failed (%d)\n", + &target->rdma_cm.dst, ret); + goto out; + } + + swap(ch->rdma_cm.cm_id, new_cm_id); + +out: + if (new_cm_id) + rdma_destroy_id(new_cm_id); + + return ret; +} + +static int srp_new_cm_id(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + + return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : + srp_new_ib_cm_id(ch); +} + +static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) +{ + struct srp_device *dev = target->srp_host->srp_dev; + struct ib_fmr_pool_param fmr_param; + + memset(&fmr_param, 0, sizeof(fmr_param)); + fmr_param.pool_size = target->mr_pool_size; + fmr_param.dirty_watermark = fmr_param.pool_size / 4; + fmr_param.cache = 1; + fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; + fmr_param.page_shift = ilog2(dev->mr_page_size); + fmr_param.access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ); + + return ib_create_fmr_pool(dev->pd, &fmr_param); +} + +/** + * srp_destroy_fr_pool() - free the resources owned by a pool + * @pool: Fast registration pool to be destroyed. + */ +static void srp_destroy_fr_pool(struct srp_fr_pool *pool) +{ + int i; + struct srp_fr_desc *d; + + if (!pool) + return; + + for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { + if (d->mr) + ib_dereg_mr(d->mr); + } + kfree(pool); +} + +/** + * srp_create_fr_pool() - allocate and initialize a pool for fast registration + * @device: IB device to allocate fast registration descriptors for. + * @pd: Protection domain associated with the FR descriptors. + * @pool_size: Number of descriptors to allocate. + * @max_page_list_len: Maximum fast registration work request page list length. + */ +static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, + struct ib_pd *pd, int pool_size, + int max_page_list_len) +{ + struct srp_fr_pool *pool; + struct srp_fr_desc *d; + struct ib_mr *mr; + int i, ret = -EINVAL; + enum ib_mr_type mr_type; + + if (pool_size <= 0) + goto err; + ret = -ENOMEM; + pool = kzalloc(sizeof(struct srp_fr_pool) + + pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); + if (!pool) + goto err; + pool->size = pool_size; + pool->max_page_list_len = max_page_list_len; + spin_lock_init(&pool->lock); + INIT_LIST_HEAD(&pool->free_list); + + if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) + mr_type = IB_MR_TYPE_SG_GAPS; + else + mr_type = IB_MR_TYPE_MEM_REG; + + for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { + mr = ib_alloc_mr(pd, mr_type, max_page_list_len); + if (IS_ERR(mr)) { + ret = PTR_ERR(mr); + if (ret == -ENOMEM) + pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n", + dev_name(&device->dev)); + goto destroy_pool; + } + d->mr = mr; + list_add_tail(&d->entry, &pool->free_list); + } + +out: + return pool; + +destroy_pool: + srp_destroy_fr_pool(pool); + +err: + pool = ERR_PTR(ret); + goto out; +} + +/** + * srp_fr_pool_get() - obtain a descriptor suitable for fast registration + * @pool: Pool to obtain descriptor from. + */ +static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) +{ + struct srp_fr_desc *d = NULL; + unsigned long flags; + + spin_lock_irqsave(&pool->lock, flags); + if (!list_empty(&pool->free_list)) { + d = list_first_entry(&pool->free_list, typeof(*d), entry); + list_del(&d->entry); + } + spin_unlock_irqrestore(&pool->lock, flags); + + return d; +} + +/** + * srp_fr_pool_put() - put an FR descriptor back in the free list + * @pool: Pool the descriptor was allocated from. + * @desc: Pointer to an array of fast registration descriptor pointers. + * @n: Number of descriptors to put back. + * + * Note: The caller must already have queued an invalidation request for + * desc->mr->rkey before calling this function. + */ +static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, + int n) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&pool->lock, flags); + for (i = 0; i < n; i++) + list_add(&desc[i]->entry, &pool->free_list); + spin_unlock_irqrestore(&pool->lock, flags); +} + +static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) +{ + struct srp_device *dev = target->srp_host->srp_dev; + + return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, + dev->max_pages_per_mr); +} + +/** + * srp_destroy_qp() - destroy an RDMA queue pair + * @ch: SRP RDMA channel. + * + * Drain the qp before destroying it. This avoids that the receive + * completion handler can access the queue pair while it is + * being destroyed. + */ +static void srp_destroy_qp(struct srp_rdma_ch *ch) +{ + spin_lock_irq(&ch->lock); + ib_process_cq_direct(ch->send_cq, -1); + spin_unlock_irq(&ch->lock); + + ib_drain_qp(ch->qp); + ib_destroy_qp(ch->qp); +} + +static int srp_create_ch_ib(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct ib_qp_init_attr *init_attr; + struct ib_cq *recv_cq, *send_cq; + struct ib_qp *qp; + struct ib_fmr_pool *fmr_pool = NULL; + struct srp_fr_pool *fr_pool = NULL; + const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; + int ret; + + init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); + if (!init_attr) + return -ENOMEM; + + /* queue_size + 1 for ib_drain_rq() */ + recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, + ch->comp_vector, IB_POLL_SOFTIRQ); + if (IS_ERR(recv_cq)) { + ret = PTR_ERR(recv_cq); + goto err; + } + + send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, + ch->comp_vector, IB_POLL_DIRECT); + if (IS_ERR(send_cq)) { + ret = PTR_ERR(send_cq); + goto err_recv_cq; + } + + init_attr->event_handler = srp_qp_event; + init_attr->cap.max_send_wr = m * target->queue_size; + init_attr->cap.max_recv_wr = target->queue_size + 1; + init_attr->cap.max_recv_sge = 1; + init_attr->cap.max_send_sge = 1; + init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; + init_attr->qp_type = IB_QPT_RC; + init_attr->send_cq = send_cq; + init_attr->recv_cq = recv_cq; + + if (target->using_rdma_cm) { + ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); + qp = ch->rdma_cm.cm_id->qp; + } else { + qp = ib_create_qp(dev->pd, init_attr); + if (!IS_ERR(qp)) { + ret = srp_init_ib_qp(target, qp); + if (ret) + ib_destroy_qp(qp); + } else { + ret = PTR_ERR(qp); + } + } + if (ret) { + pr_err("QP creation failed for dev %s: %d\n", + dev_name(&dev->dev->dev), ret); + goto err_send_cq; + } + + if (dev->use_fast_reg) { + fr_pool = srp_alloc_fr_pool(target); + if (IS_ERR(fr_pool)) { + ret = PTR_ERR(fr_pool); + shost_printk(KERN_WARNING, target->scsi_host, PFX + "FR pool allocation failed (%d)\n", ret); + goto err_qp; + } + } else if (dev->use_fmr) { + fmr_pool = srp_alloc_fmr_pool(target); + if (IS_ERR(fmr_pool)) { + ret = PTR_ERR(fmr_pool); + shost_printk(KERN_WARNING, target->scsi_host, PFX + "FMR pool allocation failed (%d)\n", ret); + goto err_qp; + } + } + + if (ch->qp) + srp_destroy_qp(ch); + if (ch->recv_cq) + ib_free_cq(ch->recv_cq); + if (ch->send_cq) + ib_free_cq(ch->send_cq); + + ch->qp = qp; + ch->recv_cq = recv_cq; + ch->send_cq = send_cq; + + if (dev->use_fast_reg) { + if (ch->fr_pool) + srp_destroy_fr_pool(ch->fr_pool); + ch->fr_pool = fr_pool; + } else if (dev->use_fmr) { + if (ch->fmr_pool) + ib_destroy_fmr_pool(ch->fmr_pool); + ch->fmr_pool = fmr_pool; + } + + kfree(init_attr); + return 0; + +err_qp: + if (target->using_rdma_cm) + rdma_destroy_qp(ch->rdma_cm.cm_id); + else + ib_destroy_qp(qp); + +err_send_cq: + ib_free_cq(send_cq); + +err_recv_cq: + ib_free_cq(recv_cq); + +err: + kfree(init_attr); + return ret; +} + +/* + * Note: this function may be called without srp_alloc_iu_bufs() having been + * invoked. Hence the ch->[rt]x_ring checks. + */ +static void srp_free_ch_ib(struct srp_target_port *target, + struct srp_rdma_ch *ch) +{ + struct srp_device *dev = target->srp_host->srp_dev; + int i; + + if (!ch->target) + return; + + if (target->using_rdma_cm) { + if (ch->rdma_cm.cm_id) { + rdma_destroy_id(ch->rdma_cm.cm_id); + ch->rdma_cm.cm_id = NULL; + } + } else { + if (ch->ib_cm.cm_id) { + ib_destroy_cm_id(ch->ib_cm.cm_id); + ch->ib_cm.cm_id = NULL; + } + } + + /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ + if (!ch->qp) + return; + + if (dev->use_fast_reg) { + if (ch->fr_pool) + srp_destroy_fr_pool(ch->fr_pool); + } else if (dev->use_fmr) { + if (ch->fmr_pool) + ib_destroy_fmr_pool(ch->fmr_pool); + } + + srp_destroy_qp(ch); + ib_free_cq(ch->send_cq); + ib_free_cq(ch->recv_cq); + + /* + * Avoid that the SCSI error handler tries to use this channel after + * it has been freed. The SCSI error handler can namely continue + * trying to perform recovery actions after scsi_remove_host() + * returned. + */ + ch->target = NULL; + + ch->qp = NULL; + ch->send_cq = ch->recv_cq = NULL; + + if (ch->rx_ring) { + for (i = 0; i < target->queue_size; ++i) + srp_free_iu(target->srp_host, ch->rx_ring[i]); + kfree(ch->rx_ring); + ch->rx_ring = NULL; + } + if (ch->tx_ring) { + for (i = 0; i < target->queue_size; ++i) + srp_free_iu(target->srp_host, ch->tx_ring[i]); + kfree(ch->tx_ring); + ch->tx_ring = NULL; + } +} + +static void srp_path_rec_completion(int status, + struct sa_path_rec *pathrec, + void *ch_ptr) +{ + struct srp_rdma_ch *ch = ch_ptr; + struct srp_target_port *target = ch->target; + + ch->status = status; + if (status) + shost_printk(KERN_ERR, target->scsi_host, + PFX "Got failed path rec status %d\n", status); + else + ch->ib_cm.path = *pathrec; + complete(&ch->done); +} + +static int srp_ib_lookup_path(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + int ret; + + ch->ib_cm.path.numb_path = 1; + + init_completion(&ch->done); + + ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client, + target->srp_host->srp_dev->dev, + target->srp_host->port, + &ch->ib_cm.path, + IB_SA_PATH_REC_SERVICE_ID | + IB_SA_PATH_REC_DGID | + IB_SA_PATH_REC_SGID | + IB_SA_PATH_REC_NUMB_PATH | + IB_SA_PATH_REC_PKEY, + SRP_PATH_REC_TIMEOUT_MS, + GFP_KERNEL, + srp_path_rec_completion, + ch, &ch->ib_cm.path_query); + if (ch->ib_cm.path_query_id < 0) + return ch->ib_cm.path_query_id; + + ret = wait_for_completion_interruptible(&ch->done); + if (ret < 0) + return ret; + + if (ch->status < 0) + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n", + ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw, + be16_to_cpu(target->ib_cm.pkey), + be64_to_cpu(target->ib_cm.service_id)); + + return ch->status; +} + +static int srp_rdma_lookup_path(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + int ret; + + init_completion(&ch->done); + + ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS); + if (ret) + return ret; + + wait_for_completion_interruptible(&ch->done); + + if (ch->status != 0) + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Path resolution failed\n"); + + return ch->status; +} + +static int srp_lookup_path(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + + return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : + srp_ib_lookup_path(ch); +} + +static u8 srp_get_subnet_timeout(struct srp_host *host) +{ + struct ib_port_attr attr; + int ret; + u8 subnet_timeout = 18; + + ret = ib_query_port(host->srp_dev->dev, host->port, &attr); + if (ret == 0) + subnet_timeout = attr.subnet_timeout; + + if (unlikely(subnet_timeout < 15)) + pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n", + dev_name(&host->srp_dev->dev->dev), subnet_timeout); + + return subnet_timeout; +} + +static int srp_send_req(struct srp_rdma_ch *ch, bool multich) +{ + struct srp_target_port *target = ch->target; + struct { + struct rdma_conn_param rdma_param; + struct srp_login_req_rdma rdma_req; + struct ib_cm_req_param ib_param; + struct srp_login_req ib_req; + } *req = NULL; + char *ipi, *tpi; + int status; + + req = kzalloc(sizeof *req, GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->ib_param.flow_control = 1; + req->ib_param.retry_count = target->tl_retry_count; + + /* + * Pick some arbitrary defaults here; we could make these + * module parameters if anyone cared about setting them. + */ + req->ib_param.responder_resources = 4; + req->ib_param.rnr_retry_count = 7; + req->ib_param.max_cm_retries = 15; + + req->ib_req.opcode = SRP_LOGIN_REQ; + req->ib_req.tag = 0; + req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len); + req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | + SRP_BUF_FORMAT_INDIRECT); + req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : + SRP_MULTICHAN_SINGLE); + + if (target->using_rdma_cm) { + req->rdma_param.flow_control = req->ib_param.flow_control; + req->rdma_param.responder_resources = + req->ib_param.responder_resources; + req->rdma_param.initiator_depth = req->ib_param.initiator_depth; + req->rdma_param.retry_count = req->ib_param.retry_count; + req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count; + req->rdma_param.private_data = &req->rdma_req; + req->rdma_param.private_data_len = sizeof(req->rdma_req); + + req->rdma_req.opcode = req->ib_req.opcode; + req->rdma_req.tag = req->ib_req.tag; + req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; + req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; + req->rdma_req.req_flags = req->ib_req.req_flags; + + ipi = req->rdma_req.initiator_port_id; + tpi = req->rdma_req.target_port_id; + } else { + u8 subnet_timeout; + + subnet_timeout = srp_get_subnet_timeout(target->srp_host); + + req->ib_param.primary_path = &ch->ib_cm.path; + req->ib_param.alternate_path = NULL; + req->ib_param.service_id = target->ib_cm.service_id; + get_random_bytes(&req->ib_param.starting_psn, 4); + req->ib_param.starting_psn &= 0xffffff; + req->ib_param.qp_num = ch->qp->qp_num; + req->ib_param.qp_type = ch->qp->qp_type; + req->ib_param.local_cm_response_timeout = subnet_timeout + 2; + req->ib_param.remote_cm_response_timeout = subnet_timeout + 2; + req->ib_param.private_data = &req->ib_req; + req->ib_param.private_data_len = sizeof(req->ib_req); + + ipi = req->ib_req.initiator_port_id; + tpi = req->ib_req.target_port_id; + } + + /* + * In the published SRP specification (draft rev. 16a), the + * port identifier format is 8 bytes of ID extension followed + * by 8 bytes of GUID. Older drafts put the two halves in the + * opposite order, so that the GUID comes first. + * + * Targets conforming to these obsolete drafts can be + * recognized by the I/O Class they report. + */ + if (target->io_class == SRP_REV10_IB_IO_CLASS) { + memcpy(ipi, &target->sgid.global.interface_id, 8); + memcpy(ipi + 8, &target->initiator_ext, 8); + memcpy(tpi, &target->ioc_guid, 8); + memcpy(tpi + 8, &target->id_ext, 8); + } else { + memcpy(ipi, &target->initiator_ext, 8); + memcpy(ipi + 8, &target->sgid.global.interface_id, 8); + memcpy(tpi, &target->id_ext, 8); + memcpy(tpi + 8, &target->ioc_guid, 8); + } + + /* + * Topspin/Cisco SRP targets will reject our login unless we + * zero out the first 8 bytes of our initiator port ID and set + * the second 8 bytes to the local node GUID. + */ + if (srp_target_is_topspin(target)) { + shost_printk(KERN_DEBUG, target->scsi_host, + PFX "Topspin/Cisco initiator port ID workaround " + "activated for target GUID %016llx\n", + be64_to_cpu(target->ioc_guid)); + memset(ipi, 0, 8); + memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8); + } + + if (target->using_rdma_cm) + status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); + else + status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param); + + kfree(req); + + return status; +} + +static bool srp_queue_remove_work(struct srp_target_port *target) +{ + bool changed = false; + + spin_lock_irq(&target->lock); + if (target->state != SRP_TARGET_REMOVED) { + target->state = SRP_TARGET_REMOVED; + changed = true; + } + spin_unlock_irq(&target->lock); + + if (changed) + queue_work(srp_remove_wq, &target->remove_work); + + return changed; +} + +static void srp_disconnect_target(struct srp_target_port *target) +{ + struct srp_rdma_ch *ch; + int i, ret; + + /* XXX should send SRP_I_LOGOUT request */ + + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + ch->connected = false; + ret = 0; + if (target->using_rdma_cm) { + if (ch->rdma_cm.cm_id) + rdma_disconnect(ch->rdma_cm.cm_id); + } else { + if (ch->ib_cm.cm_id) + ret = ib_send_cm_dreq(ch->ib_cm.cm_id, + NULL, 0); + } + if (ret < 0) { + shost_printk(KERN_DEBUG, target->scsi_host, + PFX "Sending CM DREQ failed\n"); + } + } +} + +static void srp_free_req_data(struct srp_target_port *target, + struct srp_rdma_ch *ch) +{ + struct srp_device *dev = target->srp_host->srp_dev; + struct ib_device *ibdev = dev->dev; + struct srp_request *req; + int i; + + if (!ch->req_ring) + return; + + for (i = 0; i < target->req_ring_size; ++i) { + req = &ch->req_ring[i]; + if (dev->use_fast_reg) { + kfree(req->fr_list); + } else { + kfree(req->fmr_list); + kfree(req->map_page); + } + if (req->indirect_dma_addr) { + ib_dma_unmap_single(ibdev, req->indirect_dma_addr, + target->indirect_size, + DMA_TO_DEVICE); + } + kfree(req->indirect_desc); + } + + kfree(ch->req_ring); + ch->req_ring = NULL; +} + +static int srp_alloc_req_data(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + struct srp_device *srp_dev = target->srp_host->srp_dev; + struct ib_device *ibdev = srp_dev->dev; + struct srp_request *req; + void *mr_list; + dma_addr_t dma_addr; + int i, ret = -ENOMEM; + + ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), + GFP_KERNEL); + if (!ch->req_ring) + goto out; + + for (i = 0; i < target->req_ring_size; ++i) { + req = &ch->req_ring[i]; + mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), + GFP_KERNEL); + if (!mr_list) + goto out; + if (srp_dev->use_fast_reg) { + req->fr_list = mr_list; + } else { + req->fmr_list = mr_list; + req->map_page = kmalloc_array(srp_dev->max_pages_per_mr, + sizeof(void *), + GFP_KERNEL); + if (!req->map_page) + goto out; + } + req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); + if (!req->indirect_desc) + goto out; + + dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, + target->indirect_size, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(ibdev, dma_addr)) + goto out; + + req->indirect_dma_addr = dma_addr; + } + ret = 0; + +out: + return ret; +} + +/** + * srp_del_scsi_host_attr() - Remove attributes defined in the host template. + * @shost: SCSI host whose attributes to remove from sysfs. + * + * Note: Any attributes defined in the host template and that did not exist + * before invocation of this function will be ignored. + */ +static void srp_del_scsi_host_attr(struct Scsi_Host *shost) +{ + struct device_attribute **attr; + + for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) + device_remove_file(&shost->shost_dev, *attr); +} + +static void srp_remove_target(struct srp_target_port *target) +{ + struct srp_rdma_ch *ch; + int i; + + WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); + + srp_del_scsi_host_attr(target->scsi_host); + srp_rport_get(target->rport); + srp_remove_host(target->scsi_host); + scsi_remove_host(target->scsi_host); + srp_stop_rport_timers(target->rport); + srp_disconnect_target(target); + kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + srp_free_ch_ib(target, ch); + } + cancel_work_sync(&target->tl_err_work); + srp_rport_put(target->rport); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + srp_free_req_data(target, ch); + } + kfree(target->ch); + target->ch = NULL; + + spin_lock(&target->srp_host->target_lock); + list_del(&target->list); + spin_unlock(&target->srp_host->target_lock); + + scsi_host_put(target->scsi_host); +} + +static void srp_remove_work(struct work_struct *work) +{ + struct srp_target_port *target = + container_of(work, struct srp_target_port, remove_work); + + WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); + + srp_remove_target(target); +} + +static void srp_rport_delete(struct srp_rport *rport) +{ + struct srp_target_port *target = rport->lld_data; + + srp_queue_remove_work(target); +} + +/** + * srp_connected_ch() - number of connected channels + * @target: SRP target port. + */ +static int srp_connected_ch(struct srp_target_port *target) +{ + int i, c = 0; + + for (i = 0; i < target->ch_count; i++) + c += target->ch[i].connected; + + return c; +} + +static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) +{ + struct srp_target_port *target = ch->target; + int ret; + + WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); + + ret = srp_lookup_path(ch); + if (ret) + goto out; + + while (1) { + init_completion(&ch->done); + ret = srp_send_req(ch, multich); + if (ret) + goto out; + ret = wait_for_completion_interruptible(&ch->done); + if (ret < 0) + goto out; + + /* + * The CM event handling code will set status to + * SRP_PORT_REDIRECT if we get a port redirect REJ + * back, or SRP_DLID_REDIRECT if we get a lid/qp + * redirect REJ back. + */ + ret = ch->status; + switch (ret) { + case 0: + ch->connected = true; + goto out; + + case SRP_PORT_REDIRECT: + ret = srp_lookup_path(ch); + if (ret) + goto out; + break; + + case SRP_DLID_REDIRECT: + break; + + case SRP_STALE_CONN: + shost_printk(KERN_ERR, target->scsi_host, PFX + "giving up on stale connection\n"); + ret = -ECONNRESET; + goto out; + + default: + goto out; + } + } + +out: + return ret <= 0 ? ret : -ENODEV; +} + +static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc) +{ + srp_handle_qp_err(cq, wc, "INV RKEY"); +} + +static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch, + u32 rkey) +{ + struct ib_send_wr wr = { + .opcode = IB_WR_LOCAL_INV, + .next = NULL, + .num_sge = 0, + .send_flags = 0, + .ex.invalidate_rkey = rkey, + }; + + wr.wr_cqe = &req->reg_cqe; + req->reg_cqe.done = srp_inv_rkey_err_done; + return ib_post_send(ch->qp, &wr, NULL); +} + +static void srp_unmap_data(struct scsi_cmnd *scmnd, + struct srp_rdma_ch *ch, + struct srp_request *req) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct ib_device *ibdev = dev->dev; + int i, res; + + if (!scsi_sglist(scmnd) || + (scmnd->sc_data_direction != DMA_TO_DEVICE && + scmnd->sc_data_direction != DMA_FROM_DEVICE)) + return; + + if (dev->use_fast_reg) { + struct srp_fr_desc **pfr; + + for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { + res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); + if (res < 0) { + shost_printk(KERN_ERR, target->scsi_host, PFX + "Queueing INV WR for rkey %#x failed (%d)\n", + (*pfr)->mr->rkey, res); + queue_work(system_long_wq, + &target->tl_err_work); + } + } + if (req->nmdesc) + srp_fr_pool_put(ch->fr_pool, req->fr_list, + req->nmdesc); + } else if (dev->use_fmr) { + struct ib_pool_fmr **pfmr; + + for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) + ib_fmr_pool_unmap(*pfmr); + } + + ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), + scmnd->sc_data_direction); +} + +/** + * srp_claim_req - Take ownership of the scmnd associated with a request. + * @ch: SRP RDMA channel. + * @req: SRP request. + * @sdev: If not NULL, only take ownership for this SCSI device. + * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take + * ownership of @req->scmnd if it equals @scmnd. + * + * Return value: + * Either NULL or a pointer to the SCSI command the caller became owner of. + */ +static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, + struct srp_request *req, + struct scsi_device *sdev, + struct scsi_cmnd *scmnd) +{ + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + if (req->scmnd && + (!sdev || req->scmnd->device == sdev) && + (!scmnd || req->scmnd == scmnd)) { + scmnd = req->scmnd; + req->scmnd = NULL; + } else { + scmnd = NULL; + } + spin_unlock_irqrestore(&ch->lock, flags); + + return scmnd; +} + +/** + * srp_free_req() - Unmap data and adjust ch->req_lim. + * @ch: SRP RDMA channel. + * @req: Request to be freed. + * @scmnd: SCSI command associated with @req. + * @req_lim_delta: Amount to be added to @target->req_lim. + */ +static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, + struct scsi_cmnd *scmnd, s32 req_lim_delta) +{ + unsigned long flags; + + srp_unmap_data(scmnd, ch, req); + + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += req_lim_delta; + spin_unlock_irqrestore(&ch->lock, flags); +} + +static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, + struct scsi_device *sdev, int result) +{ + struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); + + if (scmnd) { + srp_free_req(ch, req, scmnd, 0); + scmnd->result = result; + scmnd->scsi_done(scmnd); + } +} + +static void srp_terminate_io(struct srp_rport *rport) +{ + struct srp_target_port *target = rport->lld_data; + struct srp_rdma_ch *ch; + struct Scsi_Host *shost = target->scsi_host; + struct scsi_device *sdev; + int i, j; + + /* + * Invoking srp_terminate_io() while srp_queuecommand() is running + * is not safe. Hence the warning statement below. + */ + shost_for_each_device(sdev, shost) + WARN_ON_ONCE(sdev->request_queue->request_fn_active); + + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + + for (j = 0; j < target->req_ring_size; ++j) { + struct srp_request *req = &ch->req_ring[j]; + + srp_finish_req(ch, req, NULL, + DID_TRANSPORT_FAILFAST << 16); + } + } +} + +/* + * It is up to the caller to ensure that srp_rport_reconnect() calls are + * serialized and that no concurrent srp_queuecommand(), srp_abort(), + * srp_reset_device() or srp_reset_host() calls will occur while this function + * is in progress. One way to realize that is not to call this function + * directly but to call srp_reconnect_rport() instead since that last function + * serializes calls of this function via rport->mutex and also blocks + * srp_queuecommand() calls before invoking this function. + */ +static int srp_rport_reconnect(struct srp_rport *rport) +{ + struct srp_target_port *target = rport->lld_data; + struct srp_rdma_ch *ch; + int i, j, ret = 0; + bool multich = false; + + srp_disconnect_target(target); + + if (target->state == SRP_TARGET_SCANNING) + return -ENODEV; + + /* + * Now get a new local CM ID so that we avoid confusing the target in + * case things are really fouled up. Doing so also ensures that all CM + * callbacks will have finished before a new QP is allocated. + */ + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + ret += srp_new_cm_id(ch); + } + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + for (j = 0; j < target->req_ring_size; ++j) { + struct srp_request *req = &ch->req_ring[j]; + + srp_finish_req(ch, req, NULL, DID_RESET << 16); + } + } + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + /* + * Whether or not creating a new CM ID succeeded, create a new + * QP. This guarantees that all completion callback function + * invocations have finished before request resetting starts. + */ + ret += srp_create_ch_ib(ch); + + INIT_LIST_HEAD(&ch->free_tx); + for (j = 0; j < target->queue_size; ++j) + list_add(&ch->tx_ring[j]->list, &ch->free_tx); + } + + target->qp_in_error = false; + + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + if (ret) + break; + ret = srp_connect_ch(ch, multich); + multich = true; + } + + if (ret == 0) + shost_printk(KERN_INFO, target->scsi_host, + PFX "reconnect succeeded\n"); + + return ret; +} + +static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, + unsigned int dma_len, u32 rkey) +{ + struct srp_direct_buf *desc = state->desc; + + WARN_ON_ONCE(!dma_len); + + desc->va = cpu_to_be64(dma_addr); + desc->key = cpu_to_be32(rkey); + desc->len = cpu_to_be32(dma_len); + + state->total_len += dma_len; + state->desc++; + state->ndesc++; +} + +static int srp_map_finish_fmr(struct srp_map_state *state, + struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct ib_pool_fmr *fmr; + u64 io_addr = 0; + + if (state->fmr.next >= state->fmr.end) { + shost_printk(KERN_ERR, ch->target->scsi_host, + PFX "Out of MRs (mr_per_cmd = %d)\n", + ch->target->mr_per_cmd); + return -ENOMEM; + } + + WARN_ON_ONCE(!dev->use_fmr); + + if (state->npages == 0) + return 0; + + if (state->npages == 1 && target->global_rkey) { + srp_map_desc(state, state->base_dma_addr, state->dma_len, + target->global_rkey); + goto reset_state; + } + + fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, + state->npages, io_addr); + if (IS_ERR(fmr)) + return PTR_ERR(fmr); + + *state->fmr.next++ = fmr; + state->nmdesc++; + + srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, + state->dma_len, fmr->fmr->rkey); + +reset_state: + state->npages = 0; + state->dma_len = 0; + + return 0; +} + +static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc) +{ + srp_handle_qp_err(cq, wc, "FAST REG"); +} + +/* + * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset + * where to start in the first element. If sg_offset_p != NULL then + * *sg_offset_p is updated to the offset in state->sg[retval] of the first + * byte that has not yet been mapped. + */ +static int srp_map_finish_fr(struct srp_map_state *state, + struct srp_request *req, + struct srp_rdma_ch *ch, int sg_nents, + unsigned int *sg_offset_p) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct ib_reg_wr wr; + struct srp_fr_desc *desc; + u32 rkey; + int n, err; + + if (state->fr.next >= state->fr.end) { + shost_printk(KERN_ERR, ch->target->scsi_host, + PFX "Out of MRs (mr_per_cmd = %d)\n", + ch->target->mr_per_cmd); + return -ENOMEM; + } + + WARN_ON_ONCE(!dev->use_fast_reg); + + if (sg_nents == 1 && target->global_rkey) { + unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; + + srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, + sg_dma_len(state->sg) - sg_offset, + target->global_rkey); + if (sg_offset_p) + *sg_offset_p = 0; + return 1; + } + + desc = srp_fr_pool_get(ch->fr_pool); + if (!desc) + return -ENOMEM; + + rkey = ib_inc_rkey(desc->mr->rkey); + ib_update_fast_reg_key(desc->mr, rkey); + + n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, + dev->mr_page_size); + if (unlikely(n < 0)) { + srp_fr_pool_put(ch->fr_pool, &desc, 1); + pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n", + dev_name(&req->scmnd->device->sdev_gendev), sg_nents, + sg_offset_p ? *sg_offset_p : -1, n); + return n; + } + + WARN_ON_ONCE(desc->mr->length == 0); + + req->reg_cqe.done = srp_reg_mr_err_done; + + wr.wr.next = NULL; + wr.wr.opcode = IB_WR_REG_MR; + wr.wr.wr_cqe = &req->reg_cqe; + wr.wr.num_sge = 0; + wr.wr.send_flags = 0; + wr.mr = desc->mr; + wr.key = desc->mr->rkey; + wr.access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE); + + *state->fr.next++ = desc; + state->nmdesc++; + + srp_map_desc(state, desc->mr->iova, + desc->mr->length, desc->mr->rkey); + + err = ib_post_send(ch->qp, &wr.wr, NULL); + if (unlikely(err)) { + WARN_ON_ONCE(err == -ENOMEM); + return err; + } + + return n; +} + +static int srp_map_sg_entry(struct srp_map_state *state, + struct srp_rdma_ch *ch, + struct scatterlist *sg) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct ib_device *ibdev = dev->dev; + dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); + unsigned int dma_len = ib_sg_dma_len(ibdev, sg); + unsigned int len = 0; + int ret; + + WARN_ON_ONCE(!dma_len); + + while (dma_len) { + unsigned offset = dma_addr & ~dev->mr_page_mask; + + if (state->npages == dev->max_pages_per_mr || + (state->npages > 0 && offset != 0)) { + ret = srp_map_finish_fmr(state, ch); + if (ret) + return ret; + } + + len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); + + if (!state->npages) + state->base_dma_addr = dma_addr; + state->pages[state->npages++] = dma_addr & dev->mr_page_mask; + state->dma_len += len; + dma_addr += len; + dma_len -= len; + } + + /* + * If the end of the MR is not on a page boundary then we need to + * close it out and start a new one -- we can only merge at page + * boundaries. + */ + ret = 0; + if ((dma_addr & ~dev->mr_page_mask) != 0) + ret = srp_map_finish_fmr(state, ch); + return ret; +} + +static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) +{ + struct scatterlist *sg; + int i, ret; + + state->pages = req->map_page; + state->fmr.next = req->fmr_list; + state->fmr.end = req->fmr_list + ch->target->mr_per_cmd; + + for_each_sg(scat, sg, count, i) { + ret = srp_map_sg_entry(state, ch, sg); + if (ret) + return ret; + } + + ret = srp_map_finish_fmr(state, ch); + if (ret) + return ret; + + return 0; +} + +static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) +{ + unsigned int sg_offset = 0; + + state->fr.next = req->fr_list; + state->fr.end = req->fr_list + ch->target->mr_per_cmd; + state->sg = scat; + + if (count == 0) + return 0; + + while (count) { + int i, n; + + n = srp_map_finish_fr(state, req, ch, count, &sg_offset); + if (unlikely(n < 0)) + return n; + + count -= n; + for (i = 0; i < n; i++) + state->sg = sg_next(state->sg); + } + + return 0; +} + +static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct scatterlist *sg; + int i; + + for_each_sg(scat, sg, count, i) { + srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), + ib_sg_dma_len(dev->dev, sg), + target->global_rkey); + } + + return 0; +} + +/* + * Register the indirect data buffer descriptor with the HCA. + * + * Note: since the indirect data buffer descriptor has been allocated with + * kmalloc() it is guaranteed that this buffer is a physically contiguous + * memory buffer. + */ +static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, + void **next_mr, void **end_mr, u32 idb_len, + __be32 *idb_rkey) +{ + struct srp_target_port *target = ch->target; + struct srp_device *dev = target->srp_host->srp_dev; + struct srp_map_state state; + struct srp_direct_buf idb_desc; + u64 idb_pages[1]; + struct scatterlist idb_sg[1]; + int ret; + + memset(&state, 0, sizeof(state)); + memset(&idb_desc, 0, sizeof(idb_desc)); + state.gen.next = next_mr; + state.gen.end = end_mr; + state.desc = &idb_desc; + state.base_dma_addr = req->indirect_dma_addr; + state.dma_len = idb_len; + + if (dev->use_fast_reg) { + state.sg = idb_sg; + sg_init_one(idb_sg, req->indirect_desc, idb_len); + idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ +#ifdef CONFIG_NEED_SG_DMA_LENGTH + idb_sg->dma_length = idb_sg->length; /* hack^2 */ +#endif + ret = srp_map_finish_fr(&state, req, ch, 1, NULL); + if (ret < 0) + return ret; + WARN_ON_ONCE(ret < 1); + } else if (dev->use_fmr) { + state.pages = idb_pages; + state.pages[0] = (req->indirect_dma_addr & + dev->mr_page_mask); + state.npages = 1; + ret = srp_map_finish_fmr(&state, ch); + if (ret < 0) + return ret; + } else { + return -EINVAL; + } + + *idb_rkey = idb_desc.key; + + return 0; +} + +static void srp_check_mapping(struct srp_map_state *state, + struct srp_rdma_ch *ch, struct srp_request *req, + struct scatterlist *scat, int count) +{ + struct srp_device *dev = ch->target->srp_host->srp_dev; + struct srp_fr_desc **pfr; + u64 desc_len = 0, mr_len = 0; + int i; + + for (i = 0; i < state->ndesc; i++) + desc_len += be32_to_cpu(req->indirect_desc[i].len); + if (dev->use_fast_reg) + for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) + mr_len += (*pfr)->mr->length; + else if (dev->use_fmr) + for (i = 0; i < state->nmdesc; i++) + mr_len += be32_to_cpu(req->indirect_desc[i].len); + if (desc_len != scsi_bufflen(req->scmnd) || + mr_len > scsi_bufflen(req->scmnd)) + pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n", + scsi_bufflen(req->scmnd), desc_len, mr_len, + state->ndesc, state->nmdesc); +} + +/** + * srp_map_data() - map SCSI data buffer onto an SRP request + * @scmnd: SCSI command to map + * @ch: SRP RDMA channel + * @req: SRP request + * + * Returns the length in bytes of the SRP_CMD IU or a negative value if + * mapping failed. + */ +static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, + struct srp_request *req) +{ + struct srp_target_port *target = ch->target; + struct scatterlist *scat; + struct srp_cmd *cmd = req->cmd->buf; + int len, nents, count, ret; + struct srp_device *dev; + struct ib_device *ibdev; + struct srp_map_state state; + struct srp_indirect_buf *indirect_hdr; + u32 idb_len, table_len; + __be32 idb_rkey; + u8 fmt; + + if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) + return sizeof (struct srp_cmd); + + if (scmnd->sc_data_direction != DMA_FROM_DEVICE && + scmnd->sc_data_direction != DMA_TO_DEVICE) { + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Unhandled data direction %d\n", + scmnd->sc_data_direction); + return -EINVAL; + } + + nents = scsi_sg_count(scmnd); + scat = scsi_sglist(scmnd); + + dev = target->srp_host->srp_dev; + ibdev = dev->dev; + + count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); + if (unlikely(count == 0)) + return -EIO; + + fmt = SRP_DATA_DESC_DIRECT; + len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); + + if (count == 1 && target->global_rkey) { + /* + * The midlayer only generated a single gather/scatter + * entry, or DMA mapping coalesced everything to a + * single entry. So a direct descriptor along with + * the DMA MR suffices. + */ + struct srp_direct_buf *buf = (void *) cmd->add_data; + + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); + buf->key = cpu_to_be32(target->global_rkey); + buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); + + req->nmdesc = 0; + goto map_complete; + } + + /* + * We have more than one scatter/gather entry, so build our indirect + * descriptor table, trying to merge as many entries as we can. + */ + indirect_hdr = (void *) cmd->add_data; + + ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, + target->indirect_size, DMA_TO_DEVICE); + + memset(&state, 0, sizeof(state)); + state.desc = req->indirect_desc; + if (dev->use_fast_reg) + ret = srp_map_sg_fr(&state, ch, req, scat, count); + else if (dev->use_fmr) + ret = srp_map_sg_fmr(&state, ch, req, scat, count); + else + ret = srp_map_sg_dma(&state, ch, req, scat, count); + req->nmdesc = state.nmdesc; + if (ret < 0) + goto unmap; + + { + DEFINE_DYNAMIC_DEBUG_METADATA(ddm, + "Memory mapping consistency check"); + if (DYNAMIC_DEBUG_BRANCH(ddm)) + srp_check_mapping(&state, ch, req, scat, count); + } + + /* We've mapped the request, now pull as much of the indirect + * descriptor table as we can into the command buffer. If this + * target is not using an external indirect table, we are + * guaranteed to fit into the command, as the SCSI layer won't + * give us more S/G entries than we allow. + */ + if (state.ndesc == 1) { + /* + * Memory registration collapsed the sg-list into one entry, + * so use a direct descriptor. + */ + struct srp_direct_buf *buf = (void *) cmd->add_data; + + *buf = req->indirect_desc[0]; + goto map_complete; + } + + if (unlikely(target->cmd_sg_cnt < state.ndesc && + !target->allow_ext_sg)) { + shost_printk(KERN_ERR, target->scsi_host, + "Could not fit S/G list into SRP_CMD\n"); + ret = -EIO; + goto unmap; + } + + count = min(state.ndesc, target->cmd_sg_cnt); + table_len = state.ndesc * sizeof (struct srp_direct_buf); + idb_len = sizeof(struct srp_indirect_buf) + table_len; + + fmt = SRP_DATA_DESC_INDIRECT; + len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); + len += count * sizeof (struct srp_direct_buf); + + memcpy(indirect_hdr->desc_list, req->indirect_desc, + count * sizeof (struct srp_direct_buf)); + + if (!target->global_rkey) { + ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, + idb_len, &idb_rkey); + if (ret < 0) + goto unmap; + req->nmdesc++; + } else { + idb_rkey = cpu_to_be32(target->global_rkey); + } + + indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); + indirect_hdr->table_desc.key = idb_rkey; + indirect_hdr->table_desc.len = cpu_to_be32(table_len); + indirect_hdr->len = cpu_to_be32(state.total_len); + + if (scmnd->sc_data_direction == DMA_TO_DEVICE) + cmd->data_out_desc_cnt = count; + else + cmd->data_in_desc_cnt = count; + + ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, + DMA_TO_DEVICE); + +map_complete: + if (scmnd->sc_data_direction == DMA_TO_DEVICE) + cmd->buf_fmt = fmt << 4; + else + cmd->buf_fmt = fmt; + + return len; + +unmap: + srp_unmap_data(scmnd, ch, req); + if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) + ret = -E2BIG; + return ret; +} + +/* + * Return an IU and possible credit to the free pool + */ +static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, + enum srp_iu_type iu_type) +{ + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + list_add(&iu->list, &ch->free_tx); + if (iu_type != SRP_IU_RSP) + ++ch->req_lim; + spin_unlock_irqrestore(&ch->lock, flags); +} + +/* + * Must be called with ch->lock held to protect req_lim and free_tx. + * If IU is not sent, it must be returned using srp_put_tx_iu(). + * + * Note: + * An upper limit for the number of allocated information units for each + * request type is: + * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues + * more than Scsi_Host.can_queue requests. + * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. + * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than + * one unanswered SRP request to an initiator. + */ +static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, + enum srp_iu_type iu_type) +{ + struct srp_target_port *target = ch->target; + s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; + struct srp_iu *iu; + + lockdep_assert_held(&ch->lock); + + ib_process_cq_direct(ch->send_cq, -1); + + if (list_empty(&ch->free_tx)) + return NULL; + + /* Initiator responses to target requests do not consume credits */ + if (iu_type != SRP_IU_RSP) { + if (ch->req_lim <= rsv) { + ++target->zero_req_lim; + return NULL; + } + + --ch->req_lim; + } + + iu = list_first_entry(&ch->free_tx, struct srp_iu, list); + list_del(&iu->list); + return iu; +} + +/* + * Note: if this function is called from inside ib_drain_sq() then it will + * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE + * with status IB_WC_SUCCESS then that's a bug. + */ +static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); + struct srp_rdma_ch *ch = cq->cq_context; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + srp_handle_qp_err(cq, wc, "SEND"); + return; + } + + lockdep_assert_held(&ch->lock); + + list_add(&iu->list, &ch->free_tx); +} + +static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) +{ + struct srp_target_port *target = ch->target; + struct ib_sge list; + struct ib_send_wr wr; + + list.addr = iu->dma; + list.length = len; + list.lkey = target->lkey; + + iu->cqe.done = srp_send_done; + + wr.next = NULL; + wr.wr_cqe = &iu->cqe; + wr.sg_list = &list; + wr.num_sge = 1; + wr.opcode = IB_WR_SEND; + wr.send_flags = IB_SEND_SIGNALED; + + return ib_post_send(ch->qp, &wr, NULL); +} + +static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) +{ + struct srp_target_port *target = ch->target; + struct ib_recv_wr wr; + struct ib_sge list; + + list.addr = iu->dma; + list.length = iu->size; + list.lkey = target->lkey; + + iu->cqe.done = srp_recv_done; + + wr.next = NULL; + wr.wr_cqe = &iu->cqe; + wr.sg_list = &list; + wr.num_sge = 1; + + return ib_post_recv(ch->qp, &wr, NULL); +} + +static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) +{ + struct srp_target_port *target = ch->target; + struct srp_request *req; + struct scsi_cmnd *scmnd; + unsigned long flags; + + if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += be32_to_cpu(rsp->req_lim_delta); + if (rsp->tag == ch->tsk_mgmt_tag) { + ch->tsk_mgmt_status = -1; + if (be32_to_cpu(rsp->resp_data_len) >= 4) + ch->tsk_mgmt_status = rsp->data[3]; + complete(&ch->tsk_mgmt_done); + } else { + shost_printk(KERN_ERR, target->scsi_host, + "Received tsk mgmt response too late for tag %#llx\n", + rsp->tag); + } + spin_unlock_irqrestore(&ch->lock, flags); + } else { + scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); + if (scmnd && scmnd->host_scribble) { + req = (void *)scmnd->host_scribble; + scmnd = srp_claim_req(ch, req, NULL, scmnd); + } else { + scmnd = NULL; + } + if (!scmnd) { + shost_printk(KERN_ERR, target->scsi_host, + "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", + rsp->tag, ch - target->ch, ch->qp->qp_num); + + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&ch->lock, flags); + + return; + } + scmnd->result = rsp->status; + + if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { + memcpy(scmnd->sense_buffer, rsp->data + + be32_to_cpu(rsp->resp_data_len), + min_t(int, be32_to_cpu(rsp->sense_data_len), + SCSI_SENSE_BUFFERSIZE)); + } + + if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) + scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); + else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) + scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); + else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) + scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); + else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) + scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); + + srp_free_req(ch, req, scmnd, + be32_to_cpu(rsp->req_lim_delta)); + + scmnd->host_scribble = NULL; + scmnd->scsi_done(scmnd); + } +} + +static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, + void *rsp, int len) +{ + struct srp_target_port *target = ch->target; + struct ib_device *dev = target->srp_host->srp_dev->dev; + unsigned long flags; + struct srp_iu *iu; + int err; + + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += req_delta; + iu = __srp_get_tx_iu(ch, SRP_IU_RSP); + spin_unlock_irqrestore(&ch->lock, flags); + + if (!iu) { + shost_printk(KERN_ERR, target->scsi_host, PFX + "no IU available to send response\n"); + return 1; + } + + ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); + memcpy(iu->buf, rsp, len); + ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); + + err = srp_post_send(ch, iu, len); + if (err) { + shost_printk(KERN_ERR, target->scsi_host, PFX + "unable to post response: %d\n", err); + srp_put_tx_iu(ch, iu, SRP_IU_RSP); + } + + return err; +} + +static void srp_process_cred_req(struct srp_rdma_ch *ch, + struct srp_cred_req *req) +{ + struct srp_cred_rsp rsp = { + .opcode = SRP_CRED_RSP, + .tag = req->tag, + }; + s32 delta = be32_to_cpu(req->req_lim_delta); + + if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) + shost_printk(KERN_ERR, ch->target->scsi_host, PFX + "problems processing SRP_CRED_REQ\n"); +} + +static void srp_process_aer_req(struct srp_rdma_ch *ch, + struct srp_aer_req *req) +{ + struct srp_target_port *target = ch->target; + struct srp_aer_rsp rsp = { + .opcode = SRP_AER_RSP, + .tag = req->tag, + }; + s32 delta = be32_to_cpu(req->req_lim_delta); + + shost_printk(KERN_ERR, target->scsi_host, PFX + "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); + + if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) + shost_printk(KERN_ERR, target->scsi_host, PFX + "problems processing SRP_AER_REQ\n"); +} + +static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); + struct srp_rdma_ch *ch = cq->cq_context; + struct srp_target_port *target = ch->target; + struct ib_device *dev = target->srp_host->srp_dev->dev; + int res; + u8 opcode; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + srp_handle_qp_err(cq, wc, "RECV"); + return; + } + + ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, + DMA_FROM_DEVICE); + + opcode = *(u8 *) iu->buf; + + if (0) { + shost_printk(KERN_ERR, target->scsi_host, + PFX "recv completion, opcode 0x%02x\n", opcode); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, + iu->buf, wc->byte_len, true); + } + + switch (opcode) { + case SRP_RSP: + srp_process_rsp(ch, iu->buf); + break; + + case SRP_CRED_REQ: + srp_process_cred_req(ch, iu->buf); + break; + + case SRP_AER_REQ: + srp_process_aer_req(ch, iu->buf); + break; + + case SRP_T_LOGOUT: + /* XXX Handle target logout */ + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Got target logout request\n"); + break; + + default: + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Unhandled SRP opcode 0x%02x\n", opcode); + break; + } + + ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, + DMA_FROM_DEVICE); + + res = srp_post_recv(ch, iu); + if (res != 0) + shost_printk(KERN_ERR, target->scsi_host, + PFX "Recv failed with error code %d\n", res); +} + +/** + * srp_tl_err_work() - handle a transport layer error + * @work: Work structure embedded in an SRP target port. + * + * Note: This function may get invoked before the rport has been created, + * hence the target->rport test. + */ +static void srp_tl_err_work(struct work_struct *work) +{ + struct srp_target_port *target; + + target = container_of(work, struct srp_target_port, tl_err_work); + if (target->rport) + srp_start_tl_fail_timers(target->rport); +} + +static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, + const char *opname) +{ + struct srp_rdma_ch *ch = cq->cq_context; + struct srp_target_port *target = ch->target; + + if (ch->connected && !target->qp_in_error) { + shost_printk(KERN_ERR, target->scsi_host, + PFX "failed %s status %s (%d) for CQE %p\n", + opname, ib_wc_status_msg(wc->status), wc->status, + wc->wr_cqe); + queue_work(system_long_wq, &target->tl_err_work); + } + target->qp_in_error = true; +} + +static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) +{ + struct srp_target_port *target = host_to_target(shost); + struct srp_rport *rport = target->rport; + struct srp_rdma_ch *ch; + struct srp_request *req; + struct srp_iu *iu; + struct srp_cmd *cmd; + struct ib_device *dev; + unsigned long flags; + u32 tag; + u16 idx; + int len, ret; + const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; + + /* + * The SCSI EH thread is the only context from which srp_queuecommand() + * can get invoked for blocked devices (SDEV_BLOCK / + * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by + * locking the rport mutex if invoked from inside the SCSI EH. + */ + if (in_scsi_eh) + mutex_lock(&rport->mutex); + + scmnd->result = srp_chkready(target->rport); + if (unlikely(scmnd->result)) + goto err; + + WARN_ON_ONCE(scmnd->request->tag < 0); + tag = blk_mq_unique_tag(scmnd->request); + ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; + idx = blk_mq_unique_tag_to_tag(tag); + WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", + dev_name(&shost->shost_gendev), tag, idx, + target->req_ring_size); + + spin_lock_irqsave(&ch->lock, flags); + iu = __srp_get_tx_iu(ch, SRP_IU_CMD); + spin_unlock_irqrestore(&ch->lock, flags); + + if (!iu) + goto err; + + req = &ch->req_ring[idx]; + dev = target->srp_host->srp_dev->dev; + ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, + DMA_TO_DEVICE); + + scmnd->host_scribble = (void *) req; + + cmd = iu->buf; + memset(cmd, 0, sizeof *cmd); + + cmd->opcode = SRP_CMD; + int_to_scsilun(scmnd->device->lun, &cmd->lun); + cmd->tag = tag; + memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); + + req->scmnd = scmnd; + req->cmd = iu; + + len = srp_map_data(scmnd, ch, req); + if (len < 0) { + shost_printk(KERN_ERR, target->scsi_host, + PFX "Failed to map data (%d)\n", len); + /* + * If we ran out of memory descriptors (-ENOMEM) because an + * application is queuing many requests with more than + * max_pages_per_mr sg-list elements, tell the SCSI mid-layer + * to reduce queue depth temporarily. + */ + scmnd->result = len == -ENOMEM ? + DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16; + goto err_iu; + } + + ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, + DMA_TO_DEVICE); + + if (srp_post_send(ch, iu, len)) { + shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); + scmnd->result = DID_ERROR << 16; + goto err_unmap; + } + + ret = 0; + +unlock_rport: + if (in_scsi_eh) + mutex_unlock(&rport->mutex); + + return ret; + +err_unmap: + srp_unmap_data(scmnd, ch, req); + +err_iu: + srp_put_tx_iu(ch, iu, SRP_IU_CMD); + + /* + * Avoid that the loops that iterate over the request ring can + * encounter a dangling SCSI command pointer. + */ + req->scmnd = NULL; + +err: + if (scmnd->result) { + scmnd->scsi_done(scmnd); + ret = 0; + } else { + ret = SCSI_MLQUEUE_HOST_BUSY; + } + + goto unlock_rport; +} + +/* + * Note: the resources allocated in this function are freed in + * srp_free_ch_ib(). + */ +static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + int i; + + ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), + GFP_KERNEL); + if (!ch->rx_ring) + goto err_no_ring; + ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), + GFP_KERNEL); + if (!ch->tx_ring) + goto err_no_ring; + + for (i = 0; i < target->queue_size; ++i) { + ch->rx_ring[i] = srp_alloc_iu(target->srp_host, + ch->max_ti_iu_len, + GFP_KERNEL, DMA_FROM_DEVICE); + if (!ch->rx_ring[i]) + goto err; + } + + for (i = 0; i < target->queue_size; ++i) { + ch->tx_ring[i] = srp_alloc_iu(target->srp_host, + target->max_iu_len, + GFP_KERNEL, DMA_TO_DEVICE); + if (!ch->tx_ring[i]) + goto err; + + list_add(&ch->tx_ring[i]->list, &ch->free_tx); + } + + return 0; + +err: + for (i = 0; i < target->queue_size; ++i) { + srp_free_iu(target->srp_host, ch->rx_ring[i]); + srp_free_iu(target->srp_host, ch->tx_ring[i]); + } + + +err_no_ring: + kfree(ch->tx_ring); + ch->tx_ring = NULL; + kfree(ch->rx_ring); + ch->rx_ring = NULL; + + return -ENOMEM; +} + +static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) +{ + uint64_t T_tr_ns, max_compl_time_ms; + uint32_t rq_tmo_jiffies; + + /* + * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, + * table 91), both the QP timeout and the retry count have to be set + * for RC QP's during the RTR to RTS transition. + */ + WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != + (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); + + /* + * Set target->rq_tmo_jiffies to one second more than the largest time + * it can take before an error completion is generated. See also + * C9-140..142 in the IBTA spec for more information about how to + * convert the QP Local ACK Timeout value to nanoseconds. + */ + T_tr_ns = 4096 * (1ULL << qp_attr->timeout); + max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; + do_div(max_compl_time_ms, NSEC_PER_MSEC); + rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); + + return rq_tmo_jiffies; +} + +static void srp_cm_rep_handler(struct ib_cm_id *cm_id, + const struct srp_login_rsp *lrsp, + struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + struct ib_qp_attr *qp_attr = NULL; + int attr_mask = 0; + int ret = 0; + int i; + + if (lrsp->opcode == SRP_LOGIN_RSP) { + ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); + ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); + + /* + * Reserve credits for task management so we don't + * bounce requests back to the SCSI mid-layer. + */ + target->scsi_host->can_queue + = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, + target->scsi_host->can_queue); + target->scsi_host->cmd_per_lun + = min_t(int, target->scsi_host->can_queue, + target->scsi_host->cmd_per_lun); + } else { + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); + ret = -ECONNRESET; + goto error; + } + + if (!ch->rx_ring) { + ret = srp_alloc_iu_bufs(ch); + if (ret) + goto error; + } + + for (i = 0; i < target->queue_size; i++) { + struct srp_iu *iu = ch->rx_ring[i]; + + ret = srp_post_recv(ch, iu); + if (ret) + goto error; + } + + if (!target->using_rdma_cm) { + ret = -ENOMEM; + qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL); + if (!qp_attr) + goto error; + + qp_attr->qp_state = IB_QPS_RTR; + ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); + if (ret) + goto error_free; + + ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); + if (ret) + goto error_free; + + qp_attr->qp_state = IB_QPS_RTS; + ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); + if (ret) + goto error_free; + + target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); + + ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); + if (ret) + goto error_free; + + ret = ib_send_cm_rtu(cm_id, NULL, 0); + } + +error_free: + kfree(qp_attr); + +error: + ch->status = ret; +} + +static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event, + struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + struct Scsi_Host *shost = target->scsi_host; + struct ib_class_port_info *cpi; + int opcode; + u16 dlid; + + switch (event->param.rej_rcvd.reason) { + case IB_CM_REJ_PORT_CM_REDIRECT: + cpi = event->param.rej_rcvd.ari; + dlid = be16_to_cpu(cpi->redirect_lid); + sa_path_set_dlid(&ch->ib_cm.path, dlid); + ch->ib_cm.path.pkey = cpi->redirect_pkey; + cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; + memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16); + + ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; + break; + + case IB_CM_REJ_PORT_REDIRECT: + if (srp_target_is_topspin(target)) { + union ib_gid *dgid = &ch->ib_cm.path.dgid; + + /* + * Topspin/Cisco SRP gateways incorrectly send + * reject reason code 25 when they mean 24 + * (port redirect). + */ + memcpy(dgid->raw, event->param.rej_rcvd.ari, 16); + + shost_printk(KERN_DEBUG, shost, + PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", + be64_to_cpu(dgid->global.subnet_prefix), + be64_to_cpu(dgid->global.interface_id)); + + ch->status = SRP_PORT_REDIRECT; + } else { + shost_printk(KERN_WARNING, shost, + " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); + ch->status = -ECONNRESET; + } + break; + + case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: + shost_printk(KERN_WARNING, shost, + " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); + ch->status = -ECONNRESET; + break; + + case IB_CM_REJ_CONSUMER_DEFINED: + opcode = *(u8 *) event->private_data; + if (opcode == SRP_LOGIN_REJ) { + struct srp_login_rej *rej = event->private_data; + u32 reason = be32_to_cpu(rej->reason); + + if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) + shost_printk(KERN_WARNING, shost, + PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); + else + shost_printk(KERN_WARNING, shost, PFX + "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", + target->sgid.raw, + target->ib_cm.orig_dgid.raw, + reason); + } else + shost_printk(KERN_WARNING, shost, + " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," + " opcode 0x%02x\n", opcode); + ch->status = -ECONNRESET; + break; + + case IB_CM_REJ_STALE_CONN: + shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); + ch->status = SRP_STALE_CONN; + break; + + default: + shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", + event->param.rej_rcvd.reason); + ch->status = -ECONNRESET; + } +} + +static int srp_ib_cm_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event) +{ + struct srp_rdma_ch *ch = cm_id->context; + struct srp_target_port *target = ch->target; + int comp = 0; + + switch (event->event) { + case IB_CM_REQ_ERROR: + shost_printk(KERN_DEBUG, target->scsi_host, + PFX "Sending CM REQ failed\n"); + comp = 1; + ch->status = -ECONNRESET; + break; + + case IB_CM_REP_RECEIVED: + comp = 1; + srp_cm_rep_handler(cm_id, event->private_data, ch); + break; + + case IB_CM_REJ_RECEIVED: + shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); + comp = 1; + + srp_ib_cm_rej_handler(cm_id, event, ch); + break; + + case IB_CM_DREQ_RECEIVED: + shost_printk(KERN_WARNING, target->scsi_host, + PFX "DREQ received - connection closed\n"); + ch->connected = false; + if (ib_send_cm_drep(cm_id, NULL, 0)) + shost_printk(KERN_ERR, target->scsi_host, + PFX "Sending CM DREP failed\n"); + queue_work(system_long_wq, &target->tl_err_work); + break; + + case IB_CM_TIMEWAIT_EXIT: + shost_printk(KERN_ERR, target->scsi_host, + PFX "connection closed\n"); + comp = 1; + + ch->status = 0; + break; + + case IB_CM_MRA_RECEIVED: + case IB_CM_DREQ_ERROR: + case IB_CM_DREP_RECEIVED: + break; + + default: + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Unhandled CM event %d\n", event->event); + break; + } + + if (comp) + complete(&ch->done); + + return 0; +} + +static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch, + struct rdma_cm_event *event) +{ + struct srp_target_port *target = ch->target; + struct Scsi_Host *shost = target->scsi_host; + int opcode; + + switch (event->status) { + case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: + shost_printk(KERN_WARNING, shost, + " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); + ch->status = -ECONNRESET; + break; + + case IB_CM_REJ_CONSUMER_DEFINED: + opcode = *(u8 *) event->param.conn.private_data; + if (opcode == SRP_LOGIN_REJ) { + struct srp_login_rej *rej = + (struct srp_login_rej *) + event->param.conn.private_data; + u32 reason = be32_to_cpu(rej->reason); + + if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) + shost_printk(KERN_WARNING, shost, + PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); + else + shost_printk(KERN_WARNING, shost, + PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); + } else { + shost_printk(KERN_WARNING, shost, + " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n", + opcode); + } + ch->status = -ECONNRESET; + break; + + case IB_CM_REJ_STALE_CONN: + shost_printk(KERN_WARNING, shost, + " REJ reason: stale connection\n"); + ch->status = SRP_STALE_CONN; + break; + + default: + shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", + event->status); + ch->status = -ECONNRESET; + break; + } +} + +static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event) +{ + struct srp_rdma_ch *ch = cm_id->context; + struct srp_target_port *target = ch->target; + int comp = 0; + + switch (event->event) { + case RDMA_CM_EVENT_ADDR_RESOLVED: + ch->status = 0; + comp = 1; + break; + + case RDMA_CM_EVENT_ADDR_ERROR: + ch->status = -ENXIO; + comp = 1; + break; + + case RDMA_CM_EVENT_ROUTE_RESOLVED: + ch->status = 0; + comp = 1; + break; + + case RDMA_CM_EVENT_ROUTE_ERROR: + case RDMA_CM_EVENT_UNREACHABLE: + ch->status = -EHOSTUNREACH; + comp = 1; + break; + + case RDMA_CM_EVENT_CONNECT_ERROR: + shost_printk(KERN_DEBUG, target->scsi_host, + PFX "Sending CM REQ failed\n"); + comp = 1; + ch->status = -ECONNRESET; + break; + + case RDMA_CM_EVENT_ESTABLISHED: + comp = 1; + srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); + break; + + case RDMA_CM_EVENT_REJECTED: + shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); + comp = 1; + + srp_rdma_cm_rej_handler(ch, event); + break; + + case RDMA_CM_EVENT_DISCONNECTED: + if (ch->connected) { + shost_printk(KERN_WARNING, target->scsi_host, + PFX "received DREQ\n"); + rdma_disconnect(ch->rdma_cm.cm_id); + comp = 1; + ch->status = 0; + queue_work(system_long_wq, &target->tl_err_work); + } + break; + + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + shost_printk(KERN_ERR, target->scsi_host, + PFX "connection closed\n"); + + comp = 1; + ch->status = 0; + break; + + default: + shost_printk(KERN_WARNING, target->scsi_host, + PFX "Unhandled CM event %d\n", event->event); + break; + } + + if (comp) + complete(&ch->done); + + return 0; +} + +/** + * srp_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Returns queue depth. + */ +static int +srp_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + if (!sdev->tagged_supported) + qdepth = 1; + return scsi_change_queue_depth(sdev, qdepth); +} + +static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, + u8 func, u8 *status) +{ + struct srp_target_port *target = ch->target; + struct srp_rport *rport = target->rport; + struct ib_device *dev = target->srp_host->srp_dev->dev; + struct srp_iu *iu; + struct srp_tsk_mgmt *tsk_mgmt; + int res; + + if (!ch->connected || target->qp_in_error) + return -1; + + /* + * Lock the rport mutex to avoid that srp_create_ch_ib() is + * invoked while a task management function is being sent. + */ + mutex_lock(&rport->mutex); + spin_lock_irq(&ch->lock); + iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); + spin_unlock_irq(&ch->lock); + + if (!iu) { + mutex_unlock(&rport->mutex); + + return -1; + } + + ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, + DMA_TO_DEVICE); + tsk_mgmt = iu->buf; + memset(tsk_mgmt, 0, sizeof *tsk_mgmt); + + tsk_mgmt->opcode = SRP_TSK_MGMT; + int_to_scsilun(lun, &tsk_mgmt->lun); + tsk_mgmt->tsk_mgmt_func = func; + tsk_mgmt->task_tag = req_tag; + + spin_lock_irq(&ch->lock); + ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; + tsk_mgmt->tag = ch->tsk_mgmt_tag; + spin_unlock_irq(&ch->lock); + + init_completion(&ch->tsk_mgmt_done); + + ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, + DMA_TO_DEVICE); + if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { + srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); + mutex_unlock(&rport->mutex); + + return -1; + } + res = wait_for_completion_timeout(&ch->tsk_mgmt_done, + msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)); + if (res > 0 && status) + *status = ch->tsk_mgmt_status; + mutex_unlock(&rport->mutex); + + WARN_ON_ONCE(res < 0); + + return res > 0 ? 0 : -1; +} + +static int srp_abort(struct scsi_cmnd *scmnd) +{ + struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_request *req = (struct srp_request *) scmnd->host_scribble; + u32 tag; + u16 ch_idx; + struct srp_rdma_ch *ch; + int ret; + + shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); + + if (!req) + return SUCCESS; + tag = blk_mq_unique_tag(scmnd->request); + ch_idx = blk_mq_unique_tag_to_hwq(tag); + if (WARN_ON_ONCE(ch_idx >= target->ch_count)) + return SUCCESS; + ch = &target->ch[ch_idx]; + if (!srp_claim_req(ch, req, NULL, scmnd)) + return SUCCESS; + shost_printk(KERN_ERR, target->scsi_host, + "Sending SRP abort for tag %#x\n", tag); + if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, + SRP_TSK_ABORT_TASK, NULL) == 0) + ret = SUCCESS; + else if (target->rport->state == SRP_RPORT_LOST) + ret = FAST_IO_FAIL; + else + ret = FAILED; + if (ret == SUCCESS) { + srp_free_req(ch, req, scmnd, 0); + scmnd->result = DID_ABORT << 16; + scmnd->scsi_done(scmnd); + } + + return ret; +} + +static int srp_reset_device(struct scsi_cmnd *scmnd) +{ + struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_rdma_ch *ch; + u8 status; + + shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); + + ch = &target->ch[0]; + if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, + SRP_TSK_LUN_RESET, &status)) + return FAILED; + if (status) + return FAILED; + + return SUCCESS; +} + +static int srp_reset_host(struct scsi_cmnd *scmnd) +{ + struct srp_target_port *target = host_to_target(scmnd->device->host); + + shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); + + return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; +} + +static int srp_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct srp_target_port *target = host_to_target(shost); + + if (target->target_can_queue) + starget->can_queue = target->target_can_queue; + return 0; +} + +static int srp_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct srp_target_port *target = host_to_target(shost); + struct srp_device *srp_dev = target->srp_host->srp_dev; + struct ib_device *ibdev = srp_dev->dev; + + if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) + blk_queue_virt_boundary(sdev->request_queue, + ~srp_dev->mr_page_mask); + + return 0; +} + +static int srp_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct srp_target_port *target = host_to_target(shost); + struct request_queue *q = sdev->request_queue; + unsigned long timeout; + + if (sdev->type == TYPE_DISK) { + timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); + blk_queue_rq_timeout(q, timeout); + } + + return 0; +} + +static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); +} + +static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); +} + +static ssize_t show_service_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + if (target->using_rdma_cm) + return -ENOENT; + return sprintf(buf, "0x%016llx\n", + be64_to_cpu(target->ib_cm.service_id)); +} + +static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + if (target->using_rdma_cm) + return -ENOENT; + return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey)); +} + +static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%pI6\n", target->sgid.raw); +} + +static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + struct srp_rdma_ch *ch = &target->ch[0]; + + if (target->using_rdma_cm) + return -ENOENT; + return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw); +} + +static ssize_t show_orig_dgid(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + if (target->using_rdma_cm) + return -ENOENT; + return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw); +} + +static ssize_t show_req_lim(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + struct srp_rdma_ch *ch; + int i, req_lim = INT_MAX; + + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + req_lim = min(req_lim, ch->req_lim); + } + return sprintf(buf, "%d\n", req_lim); +} + +static ssize_t show_zero_req_lim(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->zero_req_lim); +} + +static ssize_t show_local_ib_port(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->srp_host->port); +} + +static ssize_t show_local_ib_device(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); +} + +static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->ch_count); +} + +static ssize_t show_comp_vector(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->comp_vector); +} + +static ssize_t show_tl_retry_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->tl_retry_count); +} + +static ssize_t show_cmd_sg_entries(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%u\n", target->cmd_sg_cnt); +} + +static ssize_t show_allow_ext_sg(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); +} + +static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); +static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); +static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); +static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); +static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); +static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); +static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); +static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); +static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); +static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); +static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); +static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL); +static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); +static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); +static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); +static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); + +static struct device_attribute *srp_host_attrs[] = { + &dev_attr_id_ext, + &dev_attr_ioc_guid, + &dev_attr_service_id, + &dev_attr_pkey, + &dev_attr_sgid, + &dev_attr_dgid, + &dev_attr_orig_dgid, + &dev_attr_req_lim, + &dev_attr_zero_req_lim, + &dev_attr_local_ib_port, + &dev_attr_local_ib_device, + &dev_attr_ch_count, + &dev_attr_comp_vector, + &dev_attr_tl_retry_count, + &dev_attr_cmd_sg_entries, + &dev_attr_allow_ext_sg, + NULL +}; + +static struct scsi_host_template srp_template = { + .module = THIS_MODULE, + .name = "InfiniBand SRP initiator", + .proc_name = DRV_NAME, + .target_alloc = srp_target_alloc, + .slave_alloc = srp_slave_alloc, + .slave_configure = srp_slave_configure, + .info = srp_target_info, + .queuecommand = srp_queuecommand, + .change_queue_depth = srp_change_queue_depth, + .eh_timed_out = srp_timed_out, + .eh_abort_handler = srp_abort, + .eh_device_reset_handler = srp_reset_device, + .eh_host_reset_handler = srp_reset_host, + .skip_settle_delay = true, + .sg_tablesize = SRP_DEF_SG_TABLESIZE, + .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, + .this_id = -1, + .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = srp_host_attrs, + .track_queue_depth = 1, +}; + +static int srp_sdev_count(struct Scsi_Host *host) +{ + struct scsi_device *sdev; + int c = 0; + + shost_for_each_device(sdev, host) + c++; + + return c; +} + +/* + * Return values: + * < 0 upon failure. Caller is responsible for SRP target port cleanup. + * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port + * removal has been scheduled. + * 0 and target->state != SRP_TARGET_REMOVED upon success. + */ +static int srp_add_target(struct srp_host *host, struct srp_target_port *target) +{ + struct srp_rport_identifiers ids; + struct srp_rport *rport; + + target->state = SRP_TARGET_SCANNING; + sprintf(target->target_name, "SRP.T10:%016llX", + be64_to_cpu(target->id_ext)); + + if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) + return -ENODEV; + + memcpy(ids.port_id, &target->id_ext, 8); + memcpy(ids.port_id + 8, &target->ioc_guid, 8); + ids.roles = SRP_RPORT_ROLE_TARGET; + rport = srp_rport_add(target->scsi_host, &ids); + if (IS_ERR(rport)) { + scsi_remove_host(target->scsi_host); + return PTR_ERR(rport); + } + + rport->lld_data = target; + target->rport = rport; + + spin_lock(&host->target_lock); + list_add_tail(&target->list, &host->target_list); + spin_unlock(&host->target_lock); + + scsi_scan_target(&target->scsi_host->shost_gendev, + 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); + + if (srp_connected_ch(target) < target->ch_count || + target->qp_in_error) { + shost_printk(KERN_INFO, target->scsi_host, + PFX "SCSI scan failed - removing SCSI host\n"); + srp_queue_remove_work(target); + goto out; + } + + pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n", + dev_name(&target->scsi_host->shost_gendev), + srp_sdev_count(target->scsi_host)); + + spin_lock_irq(&target->lock); + if (target->state == SRP_TARGET_SCANNING) + target->state = SRP_TARGET_LIVE; + spin_unlock_irq(&target->lock); + +out: + return 0; +} + +static void srp_release_dev(struct device *dev) +{ + struct srp_host *host = + container_of(dev, struct srp_host, dev); + + complete(&host->released); +} + +static struct class srp_class = { + .name = "infiniband_srp", + .dev_release = srp_release_dev +}; + +/** + * srp_conn_unique() - check whether the connection to a target is unique + * @host: SRP host. + * @target: SRP target port. + */ +static bool srp_conn_unique(struct srp_host *host, + struct srp_target_port *target) +{ + struct srp_target_port *t; + bool ret = false; + + if (target->state == SRP_TARGET_REMOVED) + goto out; + + ret = true; + + spin_lock(&host->target_lock); + list_for_each_entry(t, &host->target_list, list) { + if (t != target && + target->id_ext == t->id_ext && + target->ioc_guid == t->ioc_guid && + target->initiator_ext == t->initiator_ext) { + ret = false; + break; + } + } + spin_unlock(&host->target_lock); + +out: + return ret; +} + +/* + * Target ports are added by writing + * + * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, + * pkey=<P_Key>,service_id=<service ID> + * or + * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>, + * [src=<IPv4 address>,]dest=<IPv4 address>:<port number> + * + * to the add_target sysfs attribute. + */ +enum { + SRP_OPT_ERR = 0, + SRP_OPT_ID_EXT = 1 << 0, + SRP_OPT_IOC_GUID = 1 << 1, + SRP_OPT_DGID = 1 << 2, + SRP_OPT_PKEY = 1 << 3, + SRP_OPT_SERVICE_ID = 1 << 4, + SRP_OPT_MAX_SECT = 1 << 5, + SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, + SRP_OPT_IO_CLASS = 1 << 7, + SRP_OPT_INITIATOR_EXT = 1 << 8, + SRP_OPT_CMD_SG_ENTRIES = 1 << 9, + SRP_OPT_ALLOW_EXT_SG = 1 << 10, + SRP_OPT_SG_TABLESIZE = 1 << 11, + SRP_OPT_COMP_VECTOR = 1 << 12, + SRP_OPT_TL_RETRY_COUNT = 1 << 13, + SRP_OPT_QUEUE_SIZE = 1 << 14, + SRP_OPT_IP_SRC = 1 << 15, + SRP_OPT_IP_DEST = 1 << 16, + SRP_OPT_TARGET_CAN_QUEUE= 1 << 17, +}; + +static unsigned int srp_opt_mandatory[] = { + SRP_OPT_ID_EXT | + SRP_OPT_IOC_GUID | + SRP_OPT_DGID | + SRP_OPT_PKEY | + SRP_OPT_SERVICE_ID, + SRP_OPT_ID_EXT | + SRP_OPT_IOC_GUID | + SRP_OPT_IP_DEST, +}; + +static const match_table_t srp_opt_tokens = { + { SRP_OPT_ID_EXT, "id_ext=%s" }, + { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, + { SRP_OPT_DGID, "dgid=%s" }, + { SRP_OPT_PKEY, "pkey=%x" }, + { SRP_OPT_SERVICE_ID, "service_id=%s" }, + { SRP_OPT_MAX_SECT, "max_sect=%d" }, + { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, + { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" }, + { SRP_OPT_IO_CLASS, "io_class=%x" }, + { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, + { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, + { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, + { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, + { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, + { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, + { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, + { SRP_OPT_IP_SRC, "src=%s" }, + { SRP_OPT_IP_DEST, "dest=%s" }, + { SRP_OPT_ERR, NULL } +}; + +/** + * srp_parse_in - parse an IP address and port number combination + * @net: [in] Network namespace. + * @sa: [out] Address family, IP address and port number. + * @addr_port_str: [in] IP address and port number. + * @has_port: [out] Whether or not @addr_port_str includes a port number. + * + * Parse the following address formats: + * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5. + * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5. + */ +static int srp_parse_in(struct net *net, struct sockaddr_storage *sa, + const char *addr_port_str, bool *has_port) +{ + char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL); + char *port_str; + int ret; + + if (!addr) + return -ENOMEM; + port_str = strrchr(addr, ':'); + if (port_str && strchr(port_str, ']')) + port_str = NULL; + if (port_str) + *port_str++ = '\0'; + if (has_port) + *has_port = port_str != NULL; + ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa); + if (ret && addr[0]) { + addr_end = addr + strlen(addr) - 1; + if (addr[0] == '[' && *addr_end == ']') { + *addr_end = '\0'; + ret = inet_pton_with_scope(net, AF_INET6, addr + 1, + port_str, sa); + } + } + kfree(addr); + pr_debug("%s -> %pISpfsc\n", addr_port_str, sa); + return ret; +} + +static int srp_parse_options(struct net *net, const char *buf, + struct srp_target_port *target) +{ + char *options, *sep_opt; + char *p; + substring_t args[MAX_OPT_ARGS]; + unsigned long long ull; + bool has_port; + int opt_mask = 0; + int token; + int ret = -EINVAL; + int i; + + options = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + sep_opt = options; + while ((p = strsep(&sep_opt, ",\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, srp_opt_tokens, args); + opt_mask |= token; + + switch (token) { + case SRP_OPT_ID_EXT: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + ret = kstrtoull(p, 16, &ull); + if (ret) { + pr_warn("invalid id_ext parameter '%s'\n", p); + kfree(p); + goto out; + } + target->id_ext = cpu_to_be64(ull); + kfree(p); + break; + + case SRP_OPT_IOC_GUID: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + ret = kstrtoull(p, 16, &ull); + if (ret) { + pr_warn("invalid ioc_guid parameter '%s'\n", p); + kfree(p); + goto out; + } + target->ioc_guid = cpu_to_be64(ull); + kfree(p); + break; + + case SRP_OPT_DGID: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + if (strlen(p) != 32) { + pr_warn("bad dest GID parameter '%s'\n", p); + kfree(p); + goto out; + } + + ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16); + kfree(p); + if (ret < 0) + goto out; + break; + + case SRP_OPT_PKEY: + if (match_hex(args, &token)) { + pr_warn("bad P_Key parameter '%s'\n", p); + goto out; + } + target->ib_cm.pkey = cpu_to_be16(token); + break; + + case SRP_OPT_SERVICE_ID: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + ret = kstrtoull(p, 16, &ull); + if (ret) { + pr_warn("bad service_id parameter '%s'\n", p); + kfree(p); + goto out; + } + target->ib_cm.service_id = cpu_to_be64(ull); + kfree(p); + break; + + case SRP_OPT_IP_SRC: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, + NULL); + if (ret < 0) { + pr_warn("bad source parameter '%s'\n", p); + kfree(p); + goto out; + } + target->rdma_cm.src_specified = true; + kfree(p); + break; + + case SRP_OPT_IP_DEST: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, + &has_port); + if (!has_port) + ret = -EINVAL; + if (ret < 0) { + pr_warn("bad dest parameter '%s'\n", p); + kfree(p); + goto out; + } + target->using_rdma_cm = true; + kfree(p); + break; + + case SRP_OPT_MAX_SECT: + if (match_int(args, &token)) { + pr_warn("bad max sect parameter '%s'\n", p); + goto out; + } + target->scsi_host->max_sectors = token; + break; + + case SRP_OPT_QUEUE_SIZE: + if (match_int(args, &token) || token < 1) { + pr_warn("bad queue_size parameter '%s'\n", p); + goto out; + } + target->scsi_host->can_queue = token; + target->queue_size = token + SRP_RSP_SQ_SIZE + + SRP_TSK_MGMT_SQ_SIZE; + if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) + target->scsi_host->cmd_per_lun = token; + break; + + case SRP_OPT_MAX_CMD_PER_LUN: + if (match_int(args, &token) || token < 1) { + pr_warn("bad max cmd_per_lun parameter '%s'\n", + p); + goto out; + } + target->scsi_host->cmd_per_lun = token; + break; + + case SRP_OPT_TARGET_CAN_QUEUE: + if (match_int(args, &token) || token < 1) { + pr_warn("bad max target_can_queue parameter '%s'\n", + p); + goto out; + } + target->target_can_queue = token; + break; + + case SRP_OPT_IO_CLASS: + if (match_hex(args, &token)) { + pr_warn("bad IO class parameter '%s'\n", p); + goto out; + } + if (token != SRP_REV10_IB_IO_CLASS && + token != SRP_REV16A_IB_IO_CLASS) { + pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", + token, SRP_REV10_IB_IO_CLASS, + SRP_REV16A_IB_IO_CLASS); + goto out; + } + target->io_class = token; + break; + + case SRP_OPT_INITIATOR_EXT: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + ret = kstrtoull(p, 16, &ull); + if (ret) { + pr_warn("bad initiator_ext value '%s'\n", p); + kfree(p); + goto out; + } + target->initiator_ext = cpu_to_be64(ull); + kfree(p); + break; + + case SRP_OPT_CMD_SG_ENTRIES: + if (match_int(args, &token) || token < 1 || token > 255) { + pr_warn("bad max cmd_sg_entries parameter '%s'\n", + p); + goto out; + } + target->cmd_sg_cnt = token; + break; + + case SRP_OPT_ALLOW_EXT_SG: + if (match_int(args, &token)) { + pr_warn("bad allow_ext_sg parameter '%s'\n", p); + goto out; + } + target->allow_ext_sg = !!token; + break; + + case SRP_OPT_SG_TABLESIZE: + if (match_int(args, &token) || token < 1 || + token > SG_MAX_SEGMENTS) { + pr_warn("bad max sg_tablesize parameter '%s'\n", + p); + goto out; + } + target->sg_tablesize = token; + break; + + case SRP_OPT_COMP_VECTOR: + if (match_int(args, &token) || token < 0) { + pr_warn("bad comp_vector parameter '%s'\n", p); + goto out; + } + target->comp_vector = token; + break; + + case SRP_OPT_TL_RETRY_COUNT: + if (match_int(args, &token) || token < 2 || token > 7) { + pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", + p); + goto out; + } + target->tl_retry_count = token; + break; + + default: + pr_warn("unknown parameter or missing value '%s' in target creation request\n", + p); + goto out; + } + } + + for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) { + if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) { + ret = 0; + break; + } + } + if (ret) + pr_warn("target creation request is missing one or more parameters\n"); + + if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue + && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) + pr_warn("cmd_per_lun = %d > queue_size = %d\n", + target->scsi_host->cmd_per_lun, + target->scsi_host->can_queue); + +out: + kfree(options); + return ret; +} + +static ssize_t srp_create_target(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_host *host = + container_of(dev, struct srp_host, dev); + struct Scsi_Host *target_host; + struct srp_target_port *target; + struct srp_rdma_ch *ch; + struct srp_device *srp_dev = host->srp_dev; + struct ib_device *ibdev = srp_dev->dev; + int ret, node_idx, node, cpu, i; + unsigned int max_sectors_per_mr, mr_per_cmd = 0; + bool multich = false; + + target_host = scsi_host_alloc(&srp_template, + sizeof (struct srp_target_port)); + if (!target_host) + return -ENOMEM; + + target_host->transportt = ib_srp_transport_template; + target_host->max_channel = 0; + target_host->max_id = 1; + target_host->max_lun = -1LL; + target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; + + target = host_to_target(target_host); + + target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); + target->io_class = SRP_REV16A_IB_IO_CLASS; + target->scsi_host = target_host; + target->srp_host = host; + target->lkey = host->srp_dev->pd->local_dma_lkey; + target->global_rkey = host->srp_dev->global_rkey; + target->cmd_sg_cnt = cmd_sg_entries; + target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; + target->allow_ext_sg = allow_ext_sg; + target->tl_retry_count = 7; + target->queue_size = SRP_DEFAULT_QUEUE_SIZE; + + /* + * Avoid that the SCSI host can be removed by srp_remove_target() + * before this function returns. + */ + scsi_host_get(target->scsi_host); + + ret = mutex_lock_interruptible(&host->add_target_mutex); + if (ret < 0) + goto put; + + ret = srp_parse_options(target->net, buf, target); + if (ret) + goto out; + + target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; + + if (!srp_conn_unique(target->srp_host, target)) { + if (target->using_rdma_cm) { + shost_printk(KERN_INFO, target->scsi_host, + PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n", + be64_to_cpu(target->id_ext), + be64_to_cpu(target->ioc_guid), + &target->rdma_cm.dst); + } else { + shost_printk(KERN_INFO, target->scsi_host, + PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", + be64_to_cpu(target->id_ext), + be64_to_cpu(target->ioc_guid), + be64_to_cpu(target->initiator_ext)); + } + ret = -EEXIST; + goto out; + } + + if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && + target->cmd_sg_cnt < target->sg_tablesize) { + pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); + target->sg_tablesize = target->cmd_sg_cnt; + } + + if (srp_dev->use_fast_reg || srp_dev->use_fmr) { + bool gaps_reg = (ibdev->attrs.device_cap_flags & + IB_DEVICE_SG_GAPS_REG); + + max_sectors_per_mr = srp_dev->max_pages_per_mr << + (ilog2(srp_dev->mr_page_size) - 9); + if (!gaps_reg) { + /* + * FR and FMR can only map one HCA page per entry. If + * the start address is not aligned on a HCA page + * boundary two entries will be used for the head and + * the tail although these two entries combined + * contain at most one HCA page of data. Hence the "+ + * 1" in the calculation below. + * + * The indirect data buffer descriptor is contiguous + * so the memory for that buffer will only be + * registered if register_always is true. Hence add + * one to mr_per_cmd if register_always has been set. + */ + mr_per_cmd = register_always + + (target->scsi_host->max_sectors + 1 + + max_sectors_per_mr - 1) / max_sectors_per_mr; + } else { + mr_per_cmd = register_always + + (target->sg_tablesize + + srp_dev->max_pages_per_mr - 1) / + srp_dev->max_pages_per_mr; + } + pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n", + target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, + max_sectors_per_mr, mr_per_cmd); + } + + target_host->sg_tablesize = target->sg_tablesize; + target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; + target->mr_per_cmd = mr_per_cmd; + target->indirect_size = target->sg_tablesize * + sizeof (struct srp_direct_buf); + target->max_iu_len = sizeof (struct srp_cmd) + + sizeof (struct srp_indirect_buf) + + target->cmd_sg_cnt * sizeof (struct srp_direct_buf); + + INIT_WORK(&target->tl_err_work, srp_tl_err_work); + INIT_WORK(&target->remove_work, srp_remove_work); + spin_lock_init(&target->lock); + ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid); + if (ret) + goto out; + + ret = -ENOMEM; + target->ch_count = max_t(unsigned, num_online_nodes(), + min(ch_count ? : + min(4 * num_online_nodes(), + ibdev->num_comp_vectors), + num_online_cpus())); + target->ch = kcalloc(target->ch_count, sizeof(*target->ch), + GFP_KERNEL); + if (!target->ch) + goto out; + + node_idx = 0; + for_each_online_node(node) { + const int ch_start = (node_idx * target->ch_count / + num_online_nodes()); + const int ch_end = ((node_idx + 1) * target->ch_count / + num_online_nodes()); + const int cv_start = node_idx * ibdev->num_comp_vectors / + num_online_nodes(); + const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors / + num_online_nodes(); + int cpu_idx = 0; + + for_each_online_cpu(cpu) { + if (cpu_to_node(cpu) != node) + continue; + if (ch_start + cpu_idx >= ch_end) + continue; + ch = &target->ch[ch_start + cpu_idx]; + ch->target = target; + ch->comp_vector = cv_start == cv_end ? cv_start : + cv_start + cpu_idx % (cv_end - cv_start); + spin_lock_init(&ch->lock); + INIT_LIST_HEAD(&ch->free_tx); + ret = srp_new_cm_id(ch); + if (ret) + goto err_disconnect; + + ret = srp_create_ch_ib(ch); + if (ret) + goto err_disconnect; + + ret = srp_alloc_req_data(ch); + if (ret) + goto err_disconnect; + + ret = srp_connect_ch(ch, multich); + if (ret) { + char dst[64]; + + if (target->using_rdma_cm) + snprintf(dst, sizeof(dst), "%pIS", + &target->rdma_cm.dst); + else + snprintf(dst, sizeof(dst), "%pI6", + target->ib_cm.orig_dgid.raw); + shost_printk(KERN_ERR, target->scsi_host, + PFX "Connection %d/%d to %s failed\n", + ch_start + cpu_idx, + target->ch_count, dst); + if (node_idx == 0 && cpu_idx == 0) { + goto free_ch; + } else { + srp_free_ch_ib(target, ch); + srp_free_req_data(target, ch); + target->ch_count = ch - target->ch; + goto connected; + } + } + + multich = true; + cpu_idx++; + } + node_idx++; + } + +connected: + target->scsi_host->nr_hw_queues = target->ch_count; + + ret = srp_add_target(host, target); + if (ret) + goto err_disconnect; + + if (target->state != SRP_TARGET_REMOVED) { + if (target->using_rdma_cm) { + shost_printk(KERN_DEBUG, target->scsi_host, PFX + "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n", + be64_to_cpu(target->id_ext), + be64_to_cpu(target->ioc_guid), + target->sgid.raw, &target->rdma_cm.dst); + } else { + shost_printk(KERN_DEBUG, target->scsi_host, PFX + "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", + be64_to_cpu(target->id_ext), + be64_to_cpu(target->ioc_guid), + be16_to_cpu(target->ib_cm.pkey), + be64_to_cpu(target->ib_cm.service_id), + target->sgid.raw, + target->ib_cm.orig_dgid.raw); + } + } + + ret = count; + +out: + mutex_unlock(&host->add_target_mutex); + +put: + scsi_host_put(target->scsi_host); + if (ret < 0) { + /* + * If a call to srp_remove_target() has not been scheduled, + * drop the network namespace reference now that was obtained + * earlier in this function. + */ + if (target->state != SRP_TARGET_REMOVED) + kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); + scsi_host_put(target->scsi_host); + } + + return ret; + +err_disconnect: + srp_disconnect_target(target); + +free_ch: + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + srp_free_ch_ib(target, ch); + srp_free_req_data(target, ch); + } + + kfree(target->ch); + goto out; +} + +static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); + +static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_host *host = container_of(dev, struct srp_host, dev); + + return sprintf(buf, "%s\n", host->srp_dev->dev->name); +} + +static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); + +static ssize_t show_port(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_host *host = container_of(dev, struct srp_host, dev); + + return sprintf(buf, "%d\n", host->port); +} + +static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); + +static struct srp_host *srp_add_port(struct srp_device *device, u8 port) +{ + struct srp_host *host; + + host = kzalloc(sizeof *host, GFP_KERNEL); + if (!host) + return NULL; + + INIT_LIST_HEAD(&host->target_list); + spin_lock_init(&host->target_lock); + init_completion(&host->released); + mutex_init(&host->add_target_mutex); + host->srp_dev = device; + host->port = port; + + host->dev.class = &srp_class; + host->dev.parent = device->dev->dev.parent; + dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); + + if (device_register(&host->dev)) + goto free_host; + if (device_create_file(&host->dev, &dev_attr_add_target)) + goto err_class; + if (device_create_file(&host->dev, &dev_attr_ibdev)) + goto err_class; + if (device_create_file(&host->dev, &dev_attr_port)) + goto err_class; + + return host; + +err_class: + device_unregister(&host->dev); + +free_host: + kfree(host); + + return NULL; +} + +static void srp_add_one(struct ib_device *device) +{ + struct srp_device *srp_dev; + struct ib_device_attr *attr = &device->attrs; + struct srp_host *host; + int mr_page_shift, p; + u64 max_pages_per_mr; + unsigned int flags = 0; + + srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); + if (!srp_dev) + return; + + /* + * Use the smallest page size supported by the HCA, down to a + * minimum of 4096 bytes. We're unlikely to build large sglists + * out of smaller entries. + */ + mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); + srp_dev->mr_page_size = 1 << mr_page_shift; + srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); + max_pages_per_mr = attr->max_mr_size; + do_div(max_pages_per_mr, srp_dev->mr_page_size); + pr_debug("%s: %llu / %u = %llu <> %u\n", __func__, + attr->max_mr_size, srp_dev->mr_page_size, + max_pages_per_mr, SRP_MAX_PAGES_PER_MR); + srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, + max_pages_per_mr); + + srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && + device->map_phys_fmr && device->unmap_fmr); + srp_dev->has_fr = (attr->device_cap_flags & + IB_DEVICE_MEM_MGT_EXTENSIONS); + if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) { + dev_warn(&device->dev, "neither FMR nor FR is supported\n"); + } else if (!never_register && + attr->max_mr_size >= 2 * srp_dev->mr_page_size) { + srp_dev->use_fast_reg = (srp_dev->has_fr && + (!srp_dev->has_fmr || prefer_fr)); + srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; + } + + if (never_register || !register_always || + (!srp_dev->has_fmr && !srp_dev->has_fr)) + flags |= IB_PD_UNSAFE_GLOBAL_RKEY; + + if (srp_dev->use_fast_reg) { + srp_dev->max_pages_per_mr = + min_t(u32, srp_dev->max_pages_per_mr, + attr->max_fast_reg_page_list_len); + } + srp_dev->mr_max_size = srp_dev->mr_page_size * + srp_dev->max_pages_per_mr; + pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", + device->name, mr_page_shift, attr->max_mr_size, + attr->max_fast_reg_page_list_len, + srp_dev->max_pages_per_mr, srp_dev->mr_max_size); + + INIT_LIST_HEAD(&srp_dev->dev_list); + + srp_dev->dev = device; + srp_dev->pd = ib_alloc_pd(device, flags); + if (IS_ERR(srp_dev->pd)) + goto free_dev; + + if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { + srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; + WARN_ON_ONCE(srp_dev->global_rkey == 0); + } + + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { + host = srp_add_port(srp_dev, p); + if (host) + list_add_tail(&host->list, &srp_dev->dev_list); + } + + ib_set_client_data(device, &srp_client, srp_dev); + return; + +free_dev: + kfree(srp_dev); +} + +static void srp_remove_one(struct ib_device *device, void *client_data) +{ + struct srp_device *srp_dev; + struct srp_host *host, *tmp_host; + struct srp_target_port *target; + + srp_dev = client_data; + if (!srp_dev) + return; + + list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { + device_unregister(&host->dev); + /* + * Wait for the sysfs entry to go away, so that no new + * target ports can be created. + */ + wait_for_completion(&host->released); + + /* + * Remove all target ports. + */ + spin_lock(&host->target_lock); + list_for_each_entry(target, &host->target_list, list) + srp_queue_remove_work(target); + spin_unlock(&host->target_lock); + + /* + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. + */ + flush_workqueue(srp_remove_wq); + + kfree(host); + } + + ib_dealloc_pd(srp_dev->pd); + + kfree(srp_dev); +} + +static struct srp_function_template ib_srp_transport_functions = { + .has_rport_state = true, + .reset_timer_if_blocked = true, + .reconnect_delay = &srp_reconnect_delay, + .fast_io_fail_tmo = &srp_fast_io_fail_tmo, + .dev_loss_tmo = &srp_dev_loss_tmo, + .reconnect = srp_rport_reconnect, + .rport_delete = srp_rport_delete, + .terminate_rport_io = srp_terminate_io, +}; + +static int __init srp_init_module(void) +{ + int ret; + + if (srp_sg_tablesize) { + pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); + if (!cmd_sg_entries) + cmd_sg_entries = srp_sg_tablesize; + } + + if (!cmd_sg_entries) + cmd_sg_entries = SRP_DEF_SG_TABLESIZE; + + if (cmd_sg_entries > 255) { + pr_warn("Clamping cmd_sg_entries to 255\n"); + cmd_sg_entries = 255; + } + + if (!indirect_sg_entries) + indirect_sg_entries = cmd_sg_entries; + else if (indirect_sg_entries < cmd_sg_entries) { + pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", + cmd_sg_entries); + indirect_sg_entries = cmd_sg_entries; + } + + if (indirect_sg_entries > SG_MAX_SEGMENTS) { + pr_warn("Clamping indirect_sg_entries to %u\n", + SG_MAX_SEGMENTS); + indirect_sg_entries = SG_MAX_SEGMENTS; + } + + srp_remove_wq = create_workqueue("srp_remove"); + if (!srp_remove_wq) { + ret = -ENOMEM; + goto out; + } + + ret = -ENOMEM; + ib_srp_transport_template = + srp_attach_transport(&ib_srp_transport_functions); + if (!ib_srp_transport_template) + goto destroy_wq; + + ret = class_register(&srp_class); + if (ret) { + pr_err("couldn't register class infiniband_srp\n"); + goto release_tr; + } + + ib_sa_register_client(&srp_sa_client); + + ret = ib_register_client(&srp_client); + if (ret) { + pr_err("couldn't register IB client\n"); + goto unreg_sa; + } + +out: + return ret; + +unreg_sa: + ib_sa_unregister_client(&srp_sa_client); + class_unregister(&srp_class); + +release_tr: + srp_release_transport(ib_srp_transport_template); + +destroy_wq: + destroy_workqueue(srp_remove_wq); + goto out; +} + +static void __exit srp_cleanup_module(void) +{ + ib_unregister_client(&srp_client); + ib_sa_unregister_client(&srp_sa_client); + class_unregister(&srp_class); + srp_release_transport(ib_srp_transport_template); + destroy_workqueue(srp_remove_wq); +} + +module_init(srp_init_module); +module_exit(srp_cleanup_module); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h new file mode 100644 index 000000000..a2706086b --- /dev/null +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef IB_SRP_H +#define IB_SRP_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> + +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_sa.h> +#include <rdma/ib_cm.h> +#include <rdma/ib_fmr_pool.h> +#include <rdma/rdma_cm.h> + +enum { + SRP_PATH_REC_TIMEOUT_MS = 1000, + SRP_ABORT_TIMEOUT_MS = 5000, + + SRP_PORT_REDIRECT = 1, + SRP_DLID_REDIRECT = 2, + SRP_STALE_CONN = 3, + + SRP_DEF_SG_TABLESIZE = 12, + + SRP_DEFAULT_QUEUE_SIZE = 1 << 6, + SRP_RSP_SQ_SIZE = 1, + SRP_TSK_MGMT_SQ_SIZE = 1, + SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE - + SRP_TSK_MGMT_SQ_SIZE, + + SRP_TAG_NO_REQ = ~0U, + SRP_TAG_TSK_MGMT = 1U << 31, + + SRP_MAX_PAGES_PER_MR = 512, +}; + +enum srp_target_state { + SRP_TARGET_SCANNING, + SRP_TARGET_LIVE, + SRP_TARGET_REMOVED, +}; + +enum srp_iu_type { + SRP_IU_CMD, + SRP_IU_TSK_MGMT, + SRP_IU_RSP, +}; + +/* + * @mr_page_mask: HCA memory registration page mask. + * @mr_page_size: HCA memory registration page size. + * @mr_max_size: Maximum size in bytes of a single FMR / FR registration + * request. + */ +struct srp_device { + struct list_head dev_list; + struct ib_device *dev; + struct ib_pd *pd; + u32 global_rkey; + u64 mr_page_mask; + int mr_page_size; + int mr_max_size; + int max_pages_per_mr; + bool has_fmr; + bool has_fr; + bool use_fmr; + bool use_fast_reg; +}; + +struct srp_host { + struct srp_device *srp_dev; + u8 port; + struct device dev; + struct list_head target_list; + spinlock_t target_lock; + struct completion released; + struct list_head list; + struct mutex add_target_mutex; +}; + +struct srp_request { + struct scsi_cmnd *scmnd; + struct srp_iu *cmd; + union { + struct ib_pool_fmr **fmr_list; + struct srp_fr_desc **fr_list; + }; + u64 *map_page; + struct srp_direct_buf *indirect_desc; + dma_addr_t indirect_dma_addr; + short nmdesc; + struct ib_cqe reg_cqe; +}; + +/** + * struct srp_rdma_ch + * @comp_vector: Completion vector used by this RDMA channel. + */ +struct srp_rdma_ch { + /* These are RW in the hot path, and commonly used together */ + struct list_head free_tx; + spinlock_t lock; + s32 req_lim; + + /* These are read-only in the hot path */ + struct srp_target_port *target ____cacheline_aligned_in_smp; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + struct ib_qp *qp; + union { + struct ib_fmr_pool *fmr_pool; + struct srp_fr_pool *fr_pool; + }; + + /* Everything above this point is used in the hot path of + * command processing. Try to keep them packed into cachelines. + */ + + struct completion done; + int status; + + union { + struct ib_cm { + struct sa_path_rec path; + struct ib_sa_query *path_query; + int path_query_id; + struct ib_cm_id *cm_id; + } ib_cm; + struct rdma_cm { + struct rdma_cm_id *cm_id; + } rdma_cm; + }; + + struct srp_iu **tx_ring; + struct srp_iu **rx_ring; + struct srp_request *req_ring; + int max_ti_iu_len; + int comp_vector; + + u64 tsk_mgmt_tag; + struct completion tsk_mgmt_done; + u8 tsk_mgmt_status; + bool connected; +}; + +/** + * struct srp_target_port + * @comp_vector: Completion vector used by the first RDMA channel created for + * this target port. + */ +struct srp_target_port { + /* read and written in the hot path */ + spinlock_t lock; + + /* read only in the hot path */ + u32 global_rkey; + struct srp_rdma_ch *ch; + struct net *net; + u32 ch_count; + u32 lkey; + enum srp_target_state state; + unsigned int max_iu_len; + unsigned int cmd_sg_cnt; + unsigned int indirect_size; + bool allow_ext_sg; + + /* other member variables */ + union ib_gid sgid; + __be64 id_ext; + __be64 ioc_guid; + __be64 initiator_ext; + u16 io_class; + struct srp_host *srp_host; + struct Scsi_Host *scsi_host; + struct srp_rport *rport; + char target_name[32]; + unsigned int scsi_id; + unsigned int sg_tablesize; + unsigned int target_can_queue; + int mr_pool_size; + int mr_per_cmd; + int queue_size; + int req_ring_size; + int comp_vector; + int tl_retry_count; + + bool using_rdma_cm; + + union { + struct { + __be64 service_id; + union ib_gid orig_dgid; + __be16 pkey; + } ib_cm; + struct { + union { + struct sockaddr_in ip4; + struct sockaddr_in6 ip6; + struct sockaddr_storage ss; + } src; + union { + struct sockaddr_in ip4; + struct sockaddr_in6 ip6; + struct sockaddr_storage ss; + } dst; + bool src_specified; + } rdma_cm; + }; + + u32 rq_tmo_jiffies; + + int zero_req_lim; + + struct work_struct tl_err_work; + struct work_struct remove_work; + + struct list_head list; + bool qp_in_error; +}; + +struct srp_iu { + struct list_head list; + u64 dma; + void *buf; + size_t size; + enum dma_data_direction direction; + struct ib_cqe cqe; +}; + +/** + * struct srp_fr_desc - fast registration work request arguments + * @entry: Entry in srp_fr_pool.free_list. + * @mr: Memory region. + * @frpl: Fast registration page list. + */ +struct srp_fr_desc { + struct list_head entry; + struct ib_mr *mr; +}; + +/** + * struct srp_fr_pool - pool of fast registration descriptors + * + * An entry is available for allocation if and only if it occurs in @free_list. + * + * @size: Number of descriptors in this pool. + * @max_page_list_len: Maximum fast registration work request page list length. + * @lock: Protects free_list. + * @free_list: List of free descriptors. + * @desc: Fast registration descriptor pool. + */ +struct srp_fr_pool { + int size; + int max_page_list_len; + spinlock_t lock; + struct list_head free_list; + struct srp_fr_desc desc[0]; +}; + +/** + * struct srp_map_state - per-request DMA memory mapping state + * @desc: Pointer to the element of the SRP buffer descriptor array + * that is being filled in. + * @pages: Array with DMA addresses of pages being considered for + * memory registration. + * @base_dma_addr: DMA address of the first page that has not yet been mapped. + * @dma_len: Number of bytes that will be registered with the next + * FMR or FR memory registration call. + * @total_len: Total number of bytes in the sg-list being mapped. + * @npages: Number of page addresses in the pages[] array. + * @nmdesc: Number of FMR or FR memory descriptors used for mapping. + * @ndesc: Number of SRP buffer descriptors that have been filled in. + */ +struct srp_map_state { + union { + struct { + struct ib_pool_fmr **next; + struct ib_pool_fmr **end; + } fmr; + struct { + struct srp_fr_desc **next; + struct srp_fr_desc **end; + } fr; + struct { + void **next; + void **end; + } gen; + }; + struct srp_direct_buf *desc; + union { + u64 *pages; + struct scatterlist *sg; + }; + dma_addr_t base_dma_addr; + u32 dma_len; + u32 total_len; + unsigned int npages; + unsigned int nmdesc; + unsigned int ndesc; +}; + +#endif /* IB_SRP_H */ diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig new file mode 100644 index 000000000..fb8b7182f --- /dev/null +++ b/drivers/infiniband/ulp/srpt/Kconfig @@ -0,0 +1,12 @@ +config INFINIBAND_SRPT + tristate "InfiniBand SCSI RDMA Protocol target support" + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE + ---help--- + + Support for the SCSI RDMA Protocol (SRP) Target driver. The + SRP protocol is a protocol that allows an initiator to access + a block storage device on another host (target) over a network + that supports the RDMA protocol. Currently the RDMA protocol is + supported by InfiniBand and by iWarp network hardware. More + information about the SRP protocol can be found on the website + of the INCITS T10 technical committee (http://www.t10.org/). diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile new file mode 100644 index 000000000..e3ee4bdff --- /dev/null +++ b/drivers/infiniband/ulp/srpt/Makefile @@ -0,0 +1,2 @@ +ccflags-y := -Idrivers/target +obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h new file mode 100644 index 000000000..fb1de1f6f --- /dev/null +++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef IB_DM_MAD_H +#define IB_DM_MAD_H + +#include <linux/types.h> + +#include <rdma/ib_mad.h> + +enum { + /* + * See also section 13.4.7 Status Field, table 115 MAD Common Status + * Field Bit Values and also section 16.3.1.1 Status Field in the + * InfiniBand Architecture Specification. + */ + DM_MAD_STATUS_UNSUP_METHOD = 0x0008, + DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c, + DM_MAD_STATUS_INVALID_FIELD = 0x001c, + DM_MAD_STATUS_NO_IOC = 0x0100, + + /* + * See also the Device Management chapter, section 16.3.3 Attributes, + * table 279 Device Management Attributes in the InfiniBand + * Architecture Specification. + */ + DM_ATTR_CLASS_PORT_INFO = 0x01, + DM_ATTR_IOU_INFO = 0x10, + DM_ATTR_IOC_PROFILE = 0x11, + DM_ATTR_SVC_ENTRIES = 0x12 +}; + +struct ib_dm_hdr { + u8 reserved[28]; +}; + +/* + * Structure of management datagram sent by the SRP target implementation. + * Contains a management datagram header, reliable multi-packet transaction + * protocol (RMPP) header and ib_dm_hdr. Notes: + * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending + * management datagrams. + * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this + * is the header size that is passed to ib_create_send_mad() in ib_srpt.c. + * - The maximum supported size for a management datagram when not using RMPP + * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data. + */ +struct ib_dm_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + struct ib_dm_hdr dm_hdr; + u8 data[IB_MGMT_DEVICE_DATA]; +}; + +/* + * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand + * Architecture Specification. + */ +struct ib_dm_iou_info { + __be16 change_id; + u8 max_controllers; + u8 op_rom; + u8 controller_list[128]; +}; + +/* + * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of + * the InfiniBand Architecture Specification. + */ +struct ib_dm_ioc_profile { + __be64 guid; + __be32 vendor_id; + __be32 device_id; + __be16 device_version; + __be16 reserved1; + __be32 subsys_vendor_id; + __be32 subsys_device_id; + __be16 io_class; + __be16 io_subclass; + __be16 protocol; + __be16 protocol_version; + __be16 service_conn; + __be16 initiators_supported; + __be16 send_queue_depth; + u8 reserved2; + u8 rdma_read_depth; + __be32 send_size; + __be32 rdma_size; + u8 op_cap_mask; + u8 svc_cap_mask; + u8 num_svc_entries; + u8 reserved3[9]; + u8 id_string[64]; +}; + +struct ib_dm_svc_entry { + u8 name[40]; + __be64 id; +}; + +/* + * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture + * Specification. See also section B.7, table B.8 in the T10 SRP r16a document. + */ +struct ib_dm_svc_entries { + struct ib_dm_svc_entry service_entries[4]; +}; + +#endif diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c new file mode 100644 index 000000000..6090f1ce0 --- /dev/null +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -0,0 +1,3816 @@ +/* + * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. + * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/ctype.h> +#include <linux/kthread.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <linux/atomic.h> +#include <linux/inet.h> +#include <rdma/ib_cache.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include "ib_srpt.h" + +/* Name of this kernel module. */ +#define DRV_NAME "ib_srpt" +#define DRV_VERSION "2.0.0" +#define DRV_RELDATE "2011-02-14" + +#define SRPT_ID_STRING "Linux SRP target" + +#undef pr_fmt +#define pr_fmt(fmt) DRV_NAME " " fmt + +MODULE_AUTHOR("Vu Pham and Bart Van Assche"); +MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target " + "v" DRV_VERSION " (" DRV_RELDATE ")"); +MODULE_LICENSE("Dual BSD/GPL"); + +/* + * Global Variables + */ + +static u64 srpt_service_guid; +static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ +static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ + +static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; +module_param(srp_max_req_size, int, 0444); +MODULE_PARM_DESC(srp_max_req_size, + "Maximum size of SRP request messages in bytes."); + +static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE; +module_param(srpt_srq_size, int, 0444); +MODULE_PARM_DESC(srpt_srq_size, + "Shared receive queue (SRQ) size."); + +static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); +} +module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, + 0444); +MODULE_PARM_DESC(srpt_service_guid, + "Using this value for ioc_guid, id_ext, and cm_listen_id" + " instead of using the node_guid of the first HCA."); + +static struct ib_client srpt_client; +/* Protects both rdma_cm_port and rdma_cm_id. */ +static DEFINE_MUTEX(rdma_cm_mutex); +/* Port number RDMA/CM will bind to. */ +static u16 rdma_cm_port; +static struct rdma_cm_id *rdma_cm_id; +static void srpt_release_cmd(struct se_cmd *se_cmd); +static void srpt_free_ch(struct kref *kref); +static int srpt_queue_status(struct se_cmd *cmd); +static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc); +static void srpt_process_wait_list(struct srpt_rdma_ch *ch); + +/* + * The only allowed channel state changes are those that change the channel + * state into a state with a higher numerical value. Hence the new > prev test. + */ +static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new) +{ + unsigned long flags; + enum rdma_ch_state prev; + bool changed = false; + + spin_lock_irqsave(&ch->spinlock, flags); + prev = ch->state; + if (new > prev) { + ch->state = new; + changed = true; + } + spin_unlock_irqrestore(&ch->spinlock, flags); + + return changed; +} + +/** + * srpt_event_handler - asynchronous IB event callback function + * @handler: IB event handler registered by ib_register_event_handler(). + * @event: Description of the event that occurred. + * + * Callback function called by the InfiniBand core when an asynchronous IB + * event occurs. This callback may occur in interrupt context. See also + * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand + * Architecture Specification. + */ +static void srpt_event_handler(struct ib_event_handler *handler, + struct ib_event *event) +{ + struct srpt_device *sdev; + struct srpt_port *sport; + u8 port_num; + + sdev = ib_get_client_data(event->device, &srpt_client); + if (!sdev || sdev->device != event->device) + return; + + pr_debug("ASYNC event= %d on device= %s\n", event->event, + sdev->device->name); + + switch (event->event) { + case IB_EVENT_PORT_ERR: + port_num = event->element.port_num - 1; + if (port_num < sdev->device->phys_port_cnt) { + sport = &sdev->port[port_num]; + sport->lid = 0; + sport->sm_lid = 0; + } else { + WARN(true, "event %d: port_num %d out of range 1..%d\n", + event->event, port_num + 1, + sdev->device->phys_port_cnt); + } + break; + case IB_EVENT_PORT_ACTIVE: + case IB_EVENT_LID_CHANGE: + case IB_EVENT_PKEY_CHANGE: + case IB_EVENT_SM_CHANGE: + case IB_EVENT_CLIENT_REREGISTER: + case IB_EVENT_GID_CHANGE: + /* Refresh port data asynchronously. */ + port_num = event->element.port_num - 1; + if (port_num < sdev->device->phys_port_cnt) { + sport = &sdev->port[port_num]; + if (!sport->lid && !sport->sm_lid) + schedule_work(&sport->work); + } else { + WARN(true, "event %d: port_num %d out of range 1..%d\n", + event->event, port_num + 1, + sdev->device->phys_port_cnt); + } + break; + default: + pr_err("received unrecognized IB event %d\n", event->event); + break; + } +} + +/** + * srpt_srq_event - SRQ event callback function + * @event: Description of the event that occurred. + * @ctx: Context pointer specified at SRQ creation time. + */ +static void srpt_srq_event(struct ib_event *event, void *ctx) +{ + pr_debug("SRQ event %d\n", event->event); +} + +static const char *get_ch_state_name(enum rdma_ch_state s) +{ + switch (s) { + case CH_CONNECTING: + return "connecting"; + case CH_LIVE: + return "live"; + case CH_DISCONNECTING: + return "disconnecting"; + case CH_DRAINING: + return "draining"; + case CH_DISCONNECTED: + return "disconnected"; + } + return "???"; +} + +/** + * srpt_qp_event - QP event callback function + * @event: Description of the event that occurred. + * @ch: SRPT RDMA channel. + */ +static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) +{ + pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n", + event->event, ch, ch->sess_name, ch->state); + + switch (event->event) { + case IB_EVENT_COMM_EST: + if (ch->using_rdma_cm) + rdma_notify(ch->rdma_cm.cm_id, event->event); + else + ib_cm_notify(ch->ib_cm.cm_id, event->event); + break; + case IB_EVENT_QP_LAST_WQE_REACHED: + pr_debug("%s-%d, state %s: received Last WQE event.\n", + ch->sess_name, ch->qp->qp_num, + get_ch_state_name(ch->state)); + break; + default: + pr_err("received unrecognized IB QP event %d\n", event->event); + break; + } +} + +/** + * srpt_set_ioc - initialize a IOUnitInfo structure + * @c_list: controller list. + * @slot: one-based slot number. + * @value: four-bit value. + * + * Copies the lowest four bits of value in element slot of the array of four + * bit elements called c_list (controller list). The index slot is one-based. + */ +static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value) +{ + u16 id; + u8 tmp; + + id = (slot - 1) / 2; + if (slot & 0x1) { + tmp = c_list[id] & 0xf; + c_list[id] = (value << 4) | tmp; + } else { + tmp = c_list[id] & 0xf0; + c_list[id] = (value & 0xf) | tmp; + } +} + +/** + * srpt_get_class_port_info - copy ClassPortInfo to a management datagram + * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO. + * + * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture + * Specification. + */ +static void srpt_get_class_port_info(struct ib_dm_mad *mad) +{ + struct ib_class_port_info *cif; + + cif = (struct ib_class_port_info *)mad->data; + memset(cif, 0, sizeof(*cif)); + cif->base_version = 1; + cif->class_version = 1; + + ib_set_cpi_resp_time(cif, 20); + mad->mad_hdr.status = 0; +} + +/** + * srpt_get_iou - write IOUnitInfo to a management datagram + * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO. + * + * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture + * Specification. See also section B.7, table B.6 in the SRP r16a document. + */ +static void srpt_get_iou(struct ib_dm_mad *mad) +{ + struct ib_dm_iou_info *ioui; + u8 slot; + int i; + + ioui = (struct ib_dm_iou_info *)mad->data; + ioui->change_id = cpu_to_be16(1); + ioui->max_controllers = 16; + + /* set present for slot 1 and empty for the rest */ + srpt_set_ioc(ioui->controller_list, 1, 1); + for (i = 1, slot = 2; i < 16; i++, slot++) + srpt_set_ioc(ioui->controller_list, slot, 0); + + mad->mad_hdr.status = 0; +} + +/** + * srpt_get_ioc - write IOControllerprofile to a management datagram + * @sport: HCA port through which the MAD has been received. + * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query. + * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE. + * + * See also section 16.3.3.4 IOControllerProfile in the InfiniBand + * Architecture Specification. See also section B.7, table B.7 in the SRP + * r16a document. + */ +static void srpt_get_ioc(struct srpt_port *sport, u32 slot, + struct ib_dm_mad *mad) +{ + struct srpt_device *sdev = sport->sdev; + struct ib_dm_ioc_profile *iocp; + int send_queue_depth; + + iocp = (struct ib_dm_ioc_profile *)mad->data; + + if (!slot || slot > 16) { + mad->mad_hdr.status + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + return; + } + + if (slot > 2) { + mad->mad_hdr.status + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); + return; + } + + if (sdev->use_srq) + send_queue_depth = sdev->srq_size; + else + send_queue_depth = min(MAX_SRPT_RQ_SIZE, + sdev->device->attrs.max_qp_wr); + + memset(iocp, 0, sizeof(*iocp)); + strcpy(iocp->id_string, SRPT_ID_STRING); + iocp->guid = cpu_to_be64(srpt_service_guid); + iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id); + iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id); + iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver); + iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id); + iocp->subsys_device_id = 0x0; + iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS); + iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS); + iocp->protocol = cpu_to_be16(SRP_PROTOCOL); + iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION); + iocp->send_queue_depth = cpu_to_be16(send_queue_depth); + iocp->rdma_read_depth = 4; + iocp->send_size = cpu_to_be32(srp_max_req_size); + iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size, + 1U << 24)); + iocp->num_svc_entries = 1; + iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC | + SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC; + + mad->mad_hdr.status = 0; +} + +/** + * srpt_get_svc_entries - write ServiceEntries to a management datagram + * @ioc_guid: I/O controller GUID to use in reply. + * @slot: I/O controller number. + * @hi: End of the range of service entries to be specified in the reply. + * @lo: Start of the range of service entries to be specified in the reply.. + * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES. + * + * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture + * Specification. See also section B.7, table B.8 in the SRP r16a document. + */ +static void srpt_get_svc_entries(u64 ioc_guid, + u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad) +{ + struct ib_dm_svc_entries *svc_entries; + + WARN_ON(!ioc_guid); + + if (!slot || slot > 16) { + mad->mad_hdr.status + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + return; + } + + if (slot > 2 || lo > hi || hi > 1) { + mad->mad_hdr.status + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); + return; + } + + svc_entries = (struct ib_dm_svc_entries *)mad->data; + memset(svc_entries, 0, sizeof(*svc_entries)); + svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid); + snprintf(svc_entries->service_entries[0].name, + sizeof(svc_entries->service_entries[0].name), + "%s%016llx", + SRP_SERVICE_NAME_PREFIX, + ioc_guid); + + mad->mad_hdr.status = 0; +} + +/** + * srpt_mgmt_method_get - process a received management datagram + * @sp: HCA port through which the MAD has been received. + * @rq_mad: received MAD. + * @rsp_mad: response MAD. + */ +static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, + struct ib_dm_mad *rsp_mad) +{ + u16 attr_id; + u32 slot; + u8 hi, lo; + + attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id); + switch (attr_id) { + case DM_ATTR_CLASS_PORT_INFO: + srpt_get_class_port_info(rsp_mad); + break; + case DM_ATTR_IOU_INFO: + srpt_get_iou(rsp_mad); + break; + case DM_ATTR_IOC_PROFILE: + slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); + srpt_get_ioc(sp, slot, rsp_mad); + break; + case DM_ATTR_SVC_ENTRIES: + slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); + hi = (u8) ((slot >> 8) & 0xff); + lo = (u8) (slot & 0xff); + slot = (u16) ((slot >> 16) & 0xffff); + srpt_get_svc_entries(srpt_service_guid, + slot, hi, lo, rsp_mad); + break; + default: + rsp_mad->mad_hdr.status = + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + break; + } +} + +/** + * srpt_mad_send_handler - MAD send completion callback + * @mad_agent: Return value of ib_register_mad_agent(). + * @mad_wc: Work completion reporting that the MAD has been sent. + */ +static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, + struct ib_mad_send_wc *mad_wc) +{ + rdma_destroy_ah(mad_wc->send_buf->ah); + ib_free_send_mad(mad_wc->send_buf); +} + +/** + * srpt_mad_recv_handler - MAD reception callback function + * @mad_agent: Return value of ib_register_mad_agent(). + * @send_buf: Not used. + * @mad_wc: Work completion reporting that a MAD has been received. + */ +static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, + struct ib_mad_send_buf *send_buf, + struct ib_mad_recv_wc *mad_wc) +{ + struct srpt_port *sport = (struct srpt_port *)mad_agent->context; + struct ib_ah *ah; + struct ib_mad_send_buf *rsp; + struct ib_dm_mad *dm_mad; + + if (!mad_wc || !mad_wc->recv_buf.mad) + return; + + ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, + mad_wc->recv_buf.grh, mad_agent->port_num); + if (IS_ERR(ah)) + goto err; + + BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR); + + rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, + mad_wc->wc->pkey_index, 0, + IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, + GFP_KERNEL, + IB_MGMT_BASE_VERSION); + if (IS_ERR(rsp)) + goto err_rsp; + + rsp->ah = ah; + + dm_mad = rsp->mad; + memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad)); + dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; + dm_mad->mad_hdr.status = 0; + + switch (mad_wc->recv_buf.mad->mad_hdr.method) { + case IB_MGMT_METHOD_GET: + srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad); + break; + case IB_MGMT_METHOD_SET: + dm_mad->mad_hdr.status = + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + break; + default: + dm_mad->mad_hdr.status = + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); + break; + } + + if (!ib_post_send_mad(rsp, NULL)) { + ib_free_recv_mad(mad_wc); + /* will destroy_ah & free_send_mad in send completion */ + return; + } + + ib_free_send_mad(rsp); + +err_rsp: + rdma_destroy_ah(ah); +err: + ib_free_recv_mad(mad_wc); +} + +static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid) +{ + const __be16 *g = (const __be16 *)guid; + + return snprintf(buf, size, "%04x:%04x:%04x:%04x", + be16_to_cpu(g[0]), be16_to_cpu(g[1]), + be16_to_cpu(g[2]), be16_to_cpu(g[3])); +} + +/** + * srpt_refresh_port - configure a HCA port + * @sport: SRPT HCA port. + * + * Enable InfiniBand management datagram processing, update the cached sm_lid, + * lid and gid values, and register a callback function for processing MADs + * on the specified port. + * + * Note: It is safe to call this function more than once for the same port. + */ +static int srpt_refresh_port(struct srpt_port *sport) +{ + struct ib_mad_reg_req reg_req; + struct ib_port_modify port_modify; + struct ib_port_attr port_attr; + int ret; + + memset(&port_modify, 0, sizeof(port_modify)); + port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; + port_modify.clr_port_cap_mask = 0; + + ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); + if (ret) + goto err_mod_port; + + ret = ib_query_port(sport->sdev->device, sport->port, &port_attr); + if (ret) + goto err_query_port; + + sport->sm_lid = port_attr.sm_lid; + sport->lid = port_attr.lid; + + ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid); + if (ret) + goto err_query_port; + + sport->port_guid_wwn.priv = sport; + srpt_format_guid(sport->port_guid, sizeof(sport->port_guid), + &sport->gid.global.interface_id); + sport->port_gid_wwn.priv = sport; + snprintf(sport->port_gid, sizeof(sport->port_gid), + "0x%016llx%016llx", + be64_to_cpu(sport->gid.global.subnet_prefix), + be64_to_cpu(sport->gid.global.interface_id)); + + if (!sport->mad_agent) { + memset(®_req, 0, sizeof(reg_req)); + reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; + reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION; + set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); + set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); + + sport->mad_agent = ib_register_mad_agent(sport->sdev->device, + sport->port, + IB_QPT_GSI, + ®_req, 0, + srpt_mad_send_handler, + srpt_mad_recv_handler, + sport, 0); + if (IS_ERR(sport->mad_agent)) { + ret = PTR_ERR(sport->mad_agent); + sport->mad_agent = NULL; + goto err_query_port; + } + } + + return 0; + +err_query_port: + + port_modify.set_port_cap_mask = 0; + port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; + ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); + +err_mod_port: + + return ret; +} + +/** + * srpt_unregister_mad_agent - unregister MAD callback functions + * @sdev: SRPT HCA pointer. + * + * Note: It is safe to call this function more than once for the same device. + */ +static void srpt_unregister_mad_agent(struct srpt_device *sdev) +{ + struct ib_port_modify port_modify = { + .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, + }; + struct srpt_port *sport; + int i; + + for (i = 1; i <= sdev->device->phys_port_cnt; i++) { + sport = &sdev->port[i - 1]; + WARN_ON(sport->port != i); + if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0) + pr_err("disabling MAD processing failed.\n"); + if (sport->mad_agent) { + ib_unregister_mad_agent(sport->mad_agent); + sport->mad_agent = NULL; + } + } +} + +/** + * srpt_alloc_ioctx - allocate a SRPT I/O context structure + * @sdev: SRPT HCA pointer. + * @ioctx_size: I/O context size. + * @dma_size: Size of I/O context DMA buffer. + * @dir: DMA data direction. + */ +static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, + int ioctx_size, int dma_size, + enum dma_data_direction dir) +{ + struct srpt_ioctx *ioctx; + + ioctx = kmalloc(ioctx_size, GFP_KERNEL); + if (!ioctx) + goto err; + + ioctx->buf = kmalloc(dma_size, GFP_KERNEL); + if (!ioctx->buf) + goto err_free_ioctx; + + ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir); + if (ib_dma_mapping_error(sdev->device, ioctx->dma)) + goto err_free_buf; + + return ioctx; + +err_free_buf: + kfree(ioctx->buf); +err_free_ioctx: + kfree(ioctx); +err: + return NULL; +} + +/** + * srpt_free_ioctx - free a SRPT I/O context structure + * @sdev: SRPT HCA pointer. + * @ioctx: I/O context pointer. + * @dma_size: Size of I/O context DMA buffer. + * @dir: DMA data direction. + */ +static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, + int dma_size, enum dma_data_direction dir) +{ + if (!ioctx) + return; + + ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir); + kfree(ioctx->buf); + kfree(ioctx); +} + +/** + * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures + * @sdev: Device to allocate the I/O context ring for. + * @ring_size: Number of elements in the I/O context ring. + * @ioctx_size: I/O context size. + * @dma_size: DMA buffer size. + * @dir: DMA data direction. + */ +static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, + int ring_size, int ioctx_size, + int dma_size, enum dma_data_direction dir) +{ + struct srpt_ioctx **ring; + int i; + + WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) + && ioctx_size != sizeof(struct srpt_send_ioctx)); + + ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL); + if (!ring) + goto out; + for (i = 0; i < ring_size; ++i) { + ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir); + if (!ring[i]) + goto err; + ring[i]->index = i; + } + goto out; + +err: + while (--i >= 0) + srpt_free_ioctx(sdev, ring[i], dma_size, dir); + kvfree(ring); + ring = NULL; +out: + return ring; +} + +/** + * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures + * @ioctx_ring: I/O context ring to be freed. + * @sdev: SRPT HCA pointer. + * @ring_size: Number of ring elements. + * @dma_size: Size of I/O context DMA buffer. + * @dir: DMA data direction. + */ +static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, + struct srpt_device *sdev, int ring_size, + int dma_size, enum dma_data_direction dir) +{ + int i; + + if (!ioctx_ring) + return; + + for (i = 0; i < ring_size; ++i) + srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir); + kvfree(ioctx_ring); +} + +/** + * srpt_set_cmd_state - set the state of a SCSI command + * @ioctx: Send I/O context. + * @new: New I/O context state. + * + * Does not modify the state of aborted commands. Returns the previous command + * state. + */ +static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx, + enum srpt_command_state new) +{ + enum srpt_command_state previous; + + previous = ioctx->state; + if (previous != SRPT_STATE_DONE) + ioctx->state = new; + + return previous; +} + +/** + * srpt_test_and_set_cmd_state - test and set the state of a command + * @ioctx: Send I/O context. + * @old: Current I/O context state. + * @new: New I/O context state. + * + * Returns true if and only if the previous command state was equal to 'old'. + */ +static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx, + enum srpt_command_state old, + enum srpt_command_state new) +{ + enum srpt_command_state previous; + + WARN_ON(!ioctx); + WARN_ON(old == SRPT_STATE_DONE); + WARN_ON(new == SRPT_STATE_NEW); + + previous = ioctx->state; + if (previous == old) + ioctx->state = new; + + return previous == old; +} + +/** + * srpt_post_recv - post an IB receive request + * @sdev: SRPT HCA pointer. + * @ch: SRPT RDMA channel. + * @ioctx: Receive I/O context pointer. + */ +static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch, + struct srpt_recv_ioctx *ioctx) +{ + struct ib_sge list; + struct ib_recv_wr wr; + + BUG_ON(!sdev); + list.addr = ioctx->ioctx.dma; + list.length = srp_max_req_size; + list.lkey = sdev->lkey; + + ioctx->ioctx.cqe.done = srpt_recv_done; + wr.wr_cqe = &ioctx->ioctx.cqe; + wr.next = NULL; + wr.sg_list = &list; + wr.num_sge = 1; + + if (sdev->use_srq) + return ib_post_srq_recv(sdev->srq, &wr, NULL); + else + return ib_post_recv(ch->qp, &wr, NULL); +} + +/** + * srpt_zerolength_write - perform a zero-length RDMA write + * @ch: SRPT RDMA channel. + * + * A quote from the InfiniBand specification: C9-88: For an HCA responder + * using Reliable Connection service, for each zero-length RDMA READ or WRITE + * request, the R_Key shall not be validated, even if the request includes + * Immediate data. + */ +static int srpt_zerolength_write(struct srpt_rdma_ch *ch) +{ + struct ib_rdma_wr wr = { + .wr = { + .next = NULL, + { .wr_cqe = &ch->zw_cqe, }, + .opcode = IB_WR_RDMA_WRITE, + .send_flags = IB_SEND_SIGNALED, + } + }; + + pr_debug("%s-%d: queued zerolength write\n", ch->sess_name, + ch->qp->qp_num); + + return ib_post_send(ch->qp, &wr.wr, NULL); +} + +static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct srpt_rdma_ch *ch = cq->cq_context; + + pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num, + wc->status); + + if (wc->status == IB_WC_SUCCESS) { + srpt_process_wait_list(ch); + } else { + if (srpt_set_ch_state(ch, CH_DISCONNECTED)) + schedule_work(&ch->release_work); + else + pr_debug("%s-%d: already disconnected.\n", + ch->sess_name, ch->qp->qp_num); + } +} + +static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx, + struct srp_direct_buf *db, int nbufs, struct scatterlist **sg, + unsigned *sg_cnt) +{ + enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd); + struct srpt_rdma_ch *ch = ioctx->ch; + struct scatterlist *prev = NULL; + unsigned prev_nents; + int ret, i; + + if (nbufs == 1) { + ioctx->rw_ctxs = &ioctx->s_rw_ctx; + } else { + ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs), + GFP_KERNEL); + if (!ioctx->rw_ctxs) + return -ENOMEM; + } + + for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) { + struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; + u64 remote_addr = be64_to_cpu(db->va); + u32 size = be32_to_cpu(db->len); + u32 rkey = be32_to_cpu(db->key); + + ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false, + i < nbufs - 1); + if (ret) + goto unwind; + + ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port, + ctx->sg, ctx->nents, 0, remote_addr, rkey, dir); + if (ret < 0) { + target_free_sgl(ctx->sg, ctx->nents); + goto unwind; + } + + ioctx->n_rdma += ret; + ioctx->n_rw_ctx++; + + if (prev) { + sg_unmark_end(&prev[prev_nents - 1]); + sg_chain(prev, prev_nents + 1, ctx->sg); + } else { + *sg = ctx->sg; + } + + prev = ctx->sg; + prev_nents = ctx->nents; + + *sg_cnt += ctx->nents; + } + + return 0; + +unwind: + while (--i >= 0) { + struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; + + rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port, + ctx->sg, ctx->nents, dir); + target_free_sgl(ctx->sg, ctx->nents); + } + if (ioctx->rw_ctxs != &ioctx->s_rw_ctx) + kfree(ioctx->rw_ctxs); + return ret; +} + +static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx) +{ + enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd); + int i; + + for (i = 0; i < ioctx->n_rw_ctx; i++) { + struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; + + rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port, + ctx->sg, ctx->nents, dir); + target_free_sgl(ctx->sg, ctx->nents); + } + + if (ioctx->rw_ctxs != &ioctx->s_rw_ctx) + kfree(ioctx->rw_ctxs); +} + +static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd) +{ + /* + * The pointer computations below will only be compiled correctly + * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check + * whether srp_cmd::add_data has been declared as a byte pointer. + */ + BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) && + !__same_type(srp_cmd->add_data[0], (u8)0)); + + /* + * According to the SRP spec, the lower two bits of the 'ADDITIONAL + * CDB LENGTH' field are reserved and the size in bytes of this field + * is four times the value specified in bits 3..7. Hence the "& ~3". + */ + return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3); +} + +/** + * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request + * @ioctx: Pointer to the I/O context associated with the request. + * @srp_cmd: Pointer to the SRP_CMD request data. + * @dir: Pointer to the variable to which the transfer direction will be + * written. + * @sg: [out] scatterlist allocated for the parsed SRP_CMD. + * @sg_cnt: [out] length of @sg. + * @data_len: Pointer to the variable to which the total data length of all + * descriptors in the SRP_CMD request will be written. + * + * This function initializes ioctx->nrbuf and ioctx->r_bufs. + * + * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors; + * -ENOMEM when memory allocation fails and zero upon success. + */ +static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, + struct srp_cmd *srp_cmd, enum dma_data_direction *dir, + struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len) +{ + BUG_ON(!dir); + BUG_ON(!data_len); + + /* + * The lower four bits of the buffer format field contain the DATA-IN + * buffer descriptor format, and the highest four bits contain the + * DATA-OUT buffer descriptor format. + */ + if (srp_cmd->buf_fmt & 0xf) + /* DATA-IN: transfer data from target to initiator (read). */ + *dir = DMA_FROM_DEVICE; + else if (srp_cmd->buf_fmt >> 4) + /* DATA-OUT: transfer data from initiator to target (write). */ + *dir = DMA_TO_DEVICE; + else + *dir = DMA_NONE; + + /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */ + ioctx->cmd.data_direction = *dir; + + if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { + struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd); + + *data_len = be32_to_cpu(db->len); + return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt); + } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { + struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd); + int nbufs = be32_to_cpu(idb->table_desc.len) / + sizeof(struct srp_direct_buf); + + if (nbufs > + (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { + pr_err("received unsupported SRP_CMD request" + " type (%u out + %u in != %u / %zu)\n", + srp_cmd->data_out_desc_cnt, + srp_cmd->data_in_desc_cnt, + be32_to_cpu(idb->table_desc.len), + sizeof(struct srp_direct_buf)); + return -EINVAL; + } + + *data_len = be32_to_cpu(idb->len); + return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs, + sg, sg_cnt); + } else { + *data_len = 0; + return 0; + } +} + +/** + * srpt_init_ch_qp - initialize queue pair attributes + * @ch: SRPT RDMA channel. + * @qp: Queue pair pointer. + * + * Initialized the attributes of queue pair 'qp' by allowing local write, + * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. + */ +static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) +{ + struct ib_qp_attr *attr; + int ret; + + WARN_ON_ONCE(ch->using_rdma_cm); + + attr = kzalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) + return -ENOMEM; + + attr->qp_state = IB_QPS_INIT; + attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; + attr->port_num = ch->sport->port; + + ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port, + ch->pkey, &attr->pkey_index); + if (ret < 0) + pr_err("Translating pkey %#x failed (%d) - using index 0\n", + ch->pkey, ret); + + ret = ib_modify_qp(qp, attr, + IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT | + IB_QP_PKEY_INDEX); + + kfree(attr); + return ret; +} + +/** + * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR) + * @ch: channel of the queue pair. + * @qp: queue pair to change the state of. + * + * Returns zero upon success and a negative value upon failure. + * + * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. + * If this structure ever becomes larger, it might be necessary to allocate + * it dynamically instead of on the stack. + */ +static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + int attr_mask; + int ret; + + WARN_ON_ONCE(ch->using_rdma_cm); + + qp_attr.qp_state = IB_QPS_RTR; + ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); + if (ret) + goto out; + + qp_attr.max_dest_rd_atomic = 4; + + ret = ib_modify_qp(qp, &qp_attr, attr_mask); + +out: + return ret; +} + +/** + * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS) + * @ch: channel of the queue pair. + * @qp: queue pair to change the state of. + * + * Returns zero upon success and a negative value upon failure. + * + * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. + * If this structure ever becomes larger, it might be necessary to allocate + * it dynamically instead of on the stack. + */ +static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + int attr_mask; + int ret; + + qp_attr.qp_state = IB_QPS_RTS; + ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); + if (ret) + goto out; + + qp_attr.max_rd_atomic = 4; + + ret = ib_modify_qp(qp, &qp_attr, attr_mask); + +out: + return ret; +} + +/** + * srpt_ch_qp_err - set the channel queue pair state to 'error' + * @ch: SRPT RDMA channel. + */ +static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) +{ + struct ib_qp_attr qp_attr; + + qp_attr.qp_state = IB_QPS_ERR; + return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); +} + +/** + * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator + * @ch: SRPT RDMA channel. + */ +static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) +{ + struct srpt_send_ioctx *ioctx; + unsigned long flags; + + BUG_ON(!ch); + + ioctx = NULL; + spin_lock_irqsave(&ch->spinlock, flags); + if (!list_empty(&ch->free_list)) { + ioctx = list_first_entry(&ch->free_list, + struct srpt_send_ioctx, free_list); + list_del(&ioctx->free_list); + } + spin_unlock_irqrestore(&ch->spinlock, flags); + + if (!ioctx) + return ioctx; + + BUG_ON(ioctx->ch != ch); + ioctx->state = SRPT_STATE_NEW; + ioctx->n_rdma = 0; + ioctx->n_rw_ctx = 0; + ioctx->queue_status_only = false; + /* + * transport_init_se_cmd() does not initialize all fields, so do it + * here. + */ + memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); + memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); + + return ioctx; +} + +/** + * srpt_abort_cmd - abort a SCSI command + * @ioctx: I/O context associated with the SCSI command. + */ +static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) +{ + enum srpt_command_state state; + + BUG_ON(!ioctx); + + /* + * If the command is in a state where the target core is waiting for + * the ib_srpt driver, change the state to the next state. + */ + + state = ioctx->state; + switch (state) { + case SRPT_STATE_NEED_DATA: + ioctx->state = SRPT_STATE_DATA_IN; + break; + case SRPT_STATE_CMD_RSP_SENT: + case SRPT_STATE_MGMT_RSP_SENT: + ioctx->state = SRPT_STATE_DONE; + break; + default: + WARN_ONCE(true, "%s: unexpected I/O context state %d\n", + __func__, state); + break; + } + + pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state, + ioctx->state, ioctx->cmd.tag); + + switch (state) { + case SRPT_STATE_NEW: + case SRPT_STATE_DATA_IN: + case SRPT_STATE_MGMT: + case SRPT_STATE_DONE: + /* + * Do nothing - defer abort processing until + * srpt_queue_response() is invoked. + */ + break; + case SRPT_STATE_NEED_DATA: + pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag); + transport_generic_request_failure(&ioctx->cmd, + TCM_CHECK_CONDITION_ABORT_CMD); + break; + case SRPT_STATE_CMD_RSP_SENT: + /* + * SRP_RSP sending failed or the SRP_RSP send completion has + * not been received in time. + */ + transport_generic_free_cmd(&ioctx->cmd, 0); + break; + case SRPT_STATE_MGMT_RSP_SENT: + transport_generic_free_cmd(&ioctx->cmd, 0); + break; + default: + WARN(1, "Unexpected command state (%d)", state); + break; + } + + return state; +} + +/** + * srpt_rdma_read_done - RDMA read completion callback + * @cq: Completion queue. + * @wc: Work completion. + * + * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping + * the data that has been transferred via IB RDMA had to be postponed until the + * check_stop_free() callback. None of this is necessary anymore and needs to + * be cleaned up. + */ +static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct srpt_rdma_ch *ch = cq->cq_context; + struct srpt_send_ioctx *ioctx = + container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe); + + WARN_ON(ioctx->n_rdma <= 0); + atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); + ioctx->n_rdma = 0; + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n", + ioctx, wc->status); + srpt_abort_cmd(ioctx); + return; + } + + if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, + SRPT_STATE_DATA_IN)) + target_execute_cmd(&ioctx->cmd); + else + pr_err("%s[%d]: wrong state = %d\n", __func__, + __LINE__, ioctx->state); +} + +/** + * srpt_build_cmd_rsp - build a SRP_RSP response + * @ch: RDMA channel through which the request has been received. + * @ioctx: I/O context associated with the SRP_CMD request. The response will + * be built in the buffer ioctx->buf points at and hence this function will + * overwrite the request data. + * @tag: tag of the request for which this response is being generated. + * @status: value for the STATUS field of the SRP_RSP information unit. + * + * Returns the size in bytes of the SRP_RSP response. + * + * An SRP_RSP response contains a SCSI status or service response. See also + * section 6.9 in the SRP r16a document for the format of an SRP_RSP + * response. See also SPC-2 for more information about sense data. + */ +static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx, u64 tag, + int status) +{ + struct se_cmd *cmd = &ioctx->cmd; + struct srp_rsp *srp_rsp; + const u8 *sense_data; + int sense_data_len, max_sense_len; + u32 resid = cmd->residual_count; + + /* + * The lowest bit of all SAM-3 status codes is zero (see also + * paragraph 5.3 in SAM-3). + */ + WARN_ON(status & 1); + + srp_rsp = ioctx->ioctx.buf; + BUG_ON(!srp_rsp); + + sense_data = ioctx->sense_data; + sense_data_len = ioctx->cmd.scsi_sense_length; + WARN_ON(sense_data_len > sizeof(ioctx->sense_data)); + + memset(srp_rsp, 0, sizeof(*srp_rsp)); + srp_rsp->opcode = SRP_RSP; + srp_rsp->req_lim_delta = + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); + srp_rsp->tag = tag; + srp_rsp->status = status; + + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + if (cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an underflow write */ + srp_rsp->flags = SRP_RSP_FLAG_DOUNDER; + srp_rsp->data_out_res_cnt = cpu_to_be32(resid); + } else if (cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an underflow read */ + srp_rsp->flags = SRP_RSP_FLAG_DIUNDER; + srp_rsp->data_in_res_cnt = cpu_to_be32(resid); + } + } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + if (cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an overflow write */ + srp_rsp->flags = SRP_RSP_FLAG_DOOVER; + srp_rsp->data_out_res_cnt = cpu_to_be32(resid); + } else if (cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an overflow read */ + srp_rsp->flags = SRP_RSP_FLAG_DIOVER; + srp_rsp->data_in_res_cnt = cpu_to_be32(resid); + } + } + + if (sense_data_len) { + BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); + max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); + if (sense_data_len > max_sense_len) { + pr_warn("truncated sense data from %d to %d" + " bytes\n", sense_data_len, max_sense_len); + sense_data_len = max_sense_len; + } + + srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID; + srp_rsp->sense_data_len = cpu_to_be32(sense_data_len); + memcpy(srp_rsp + 1, sense_data, sense_data_len); + } + + return sizeof(*srp_rsp) + sense_data_len; +} + +/** + * srpt_build_tskmgmt_rsp - build a task management response + * @ch: RDMA channel through which the request has been received. + * @ioctx: I/O context in which the SRP_RSP response will be built. + * @rsp_code: RSP_CODE that will be stored in the response. + * @tag: Tag of the request for which this response is being generated. + * + * Returns the size in bytes of the SRP_RSP response. + * + * An SRP_RSP response contains a SCSI status or service response. See also + * section 6.9 in the SRP r16a document for the format of an SRP_RSP + * response. + */ +static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx, + u8 rsp_code, u64 tag) +{ + struct srp_rsp *srp_rsp; + int resp_data_len; + int resp_len; + + resp_data_len = 4; + resp_len = sizeof(*srp_rsp) + resp_data_len; + + srp_rsp = ioctx->ioctx.buf; + BUG_ON(!srp_rsp); + memset(srp_rsp, 0, sizeof(*srp_rsp)); + + srp_rsp->opcode = SRP_RSP; + srp_rsp->req_lim_delta = + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); + srp_rsp->tag = tag; + + srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; + srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); + srp_rsp->data[3] = rsp_code; + + return resp_len; +} + +static int srpt_check_stop_free(struct se_cmd *cmd) +{ + struct srpt_send_ioctx *ioctx = container_of(cmd, + struct srpt_send_ioctx, cmd); + + return target_put_sess_cmd(&ioctx->cmd); +} + +/** + * srpt_handle_cmd - process a SRP_CMD information unit + * @ch: SRPT RDMA channel. + * @recv_ioctx: Receive I/O context. + * @send_ioctx: Send I/O context. + */ +static void srpt_handle_cmd(struct srpt_rdma_ch *ch, + struct srpt_recv_ioctx *recv_ioctx, + struct srpt_send_ioctx *send_ioctx) +{ + struct se_cmd *cmd; + struct srp_cmd *srp_cmd; + struct scatterlist *sg = NULL; + unsigned sg_cnt = 0; + u64 data_len; + enum dma_data_direction dir; + int rc; + + BUG_ON(!send_ioctx); + + srp_cmd = recv_ioctx->ioctx.buf; + cmd = &send_ioctx->cmd; + cmd->tag = srp_cmd->tag; + + switch (srp_cmd->task_attr) { + case SRP_CMD_SIMPLE_Q: + cmd->sam_task_attr = TCM_SIMPLE_TAG; + break; + case SRP_CMD_ORDERED_Q: + default: + cmd->sam_task_attr = TCM_ORDERED_TAG; + break; + case SRP_CMD_HEAD_OF_Q: + cmd->sam_task_attr = TCM_HEAD_TAG; + break; + case SRP_CMD_ACA: + cmd->sam_task_attr = TCM_ACA_TAG; + break; + } + + rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt, + &data_len); + if (rc) { + if (rc != -EAGAIN) { + pr_err("0x%llx: parsing SRP descriptor table failed.\n", + srp_cmd->tag); + } + goto release_ioctx; + } + + rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb, + &send_ioctx->sense_data[0], + scsilun_to_int(&srp_cmd->lun), data_len, + TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF, + sg, sg_cnt, NULL, 0, NULL, 0); + if (rc != 0) { + pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc, + srp_cmd->tag); + goto release_ioctx; + } + return; + +release_ioctx: + send_ioctx->state = SRPT_STATE_DONE; + srpt_release_cmd(cmd); +} + +static int srp_tmr_to_tcm(int fn) +{ + switch (fn) { + case SRP_TSK_ABORT_TASK: + return TMR_ABORT_TASK; + case SRP_TSK_ABORT_TASK_SET: + return TMR_ABORT_TASK_SET; + case SRP_TSK_CLEAR_TASK_SET: + return TMR_CLEAR_TASK_SET; + case SRP_TSK_LUN_RESET: + return TMR_LUN_RESET; + case SRP_TSK_CLEAR_ACA: + return TMR_CLEAR_ACA; + default: + return -1; + } +} + +/** + * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit + * @ch: SRPT RDMA channel. + * @recv_ioctx: Receive I/O context. + * @send_ioctx: Send I/O context. + * + * Returns 0 if and only if the request will be processed by the target core. + * + * For more information about SRP_TSK_MGMT information units, see also section + * 6.7 in the SRP r16a document. + */ +static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, + struct srpt_recv_ioctx *recv_ioctx, + struct srpt_send_ioctx *send_ioctx) +{ + struct srp_tsk_mgmt *srp_tsk; + struct se_cmd *cmd; + struct se_session *sess = ch->sess; + int tcm_tmr; + int rc; + + BUG_ON(!send_ioctx); + + srp_tsk = recv_ioctx->ioctx.buf; + cmd = &send_ioctx->cmd; + + pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n", + srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch, + ch->sess); + + srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); + send_ioctx->cmd.tag = srp_tsk->tag; + tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); + rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, + scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr, + GFP_KERNEL, srp_tsk->task_tag, + TARGET_SCF_ACK_KREF); + if (rc != 0) { + send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; + goto fail; + } + return; +fail: + transport_send_check_condition_and_sense(cmd, 0, 0); // XXX: +} + +/** + * srpt_handle_new_iu - process a newly received information unit + * @ch: RDMA channel through which the information unit has been received. + * @recv_ioctx: Receive I/O context associated with the information unit. + */ +static bool +srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx) +{ + struct srpt_send_ioctx *send_ioctx = NULL; + struct srp_cmd *srp_cmd; + bool res = false; + u8 opcode; + + BUG_ON(!ch); + BUG_ON(!recv_ioctx); + + if (unlikely(ch->state == CH_CONNECTING)) + goto push; + + ib_dma_sync_single_for_cpu(ch->sport->sdev->device, + recv_ioctx->ioctx.dma, srp_max_req_size, + DMA_FROM_DEVICE); + + srp_cmd = recv_ioctx->ioctx.buf; + opcode = srp_cmd->opcode; + if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) { + send_ioctx = srpt_get_send_ioctx(ch); + if (unlikely(!send_ioctx)) + goto push; + } + + if (!list_empty(&recv_ioctx->wait_list)) { + WARN_ON_ONCE(!ch->processing_wait_list); + list_del_init(&recv_ioctx->wait_list); + } + + switch (opcode) { + case SRP_CMD: + srpt_handle_cmd(ch, recv_ioctx, send_ioctx); + break; + case SRP_TSK_MGMT: + srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); + break; + case SRP_I_LOGOUT: + pr_err("Not yet implemented: SRP_I_LOGOUT\n"); + break; + case SRP_CRED_RSP: + pr_debug("received SRP_CRED_RSP\n"); + break; + case SRP_AER_RSP: + pr_debug("received SRP_AER_RSP\n"); + break; + case SRP_RSP: + pr_err("Received SRP_RSP\n"); + break; + default: + pr_err("received IU with unknown opcode 0x%x\n", opcode); + break; + } + + srpt_post_recv(ch->sport->sdev, ch, recv_ioctx); + res = true; + +out: + return res; + +push: + if (list_empty(&recv_ioctx->wait_list)) { + WARN_ON_ONCE(ch->processing_wait_list); + list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list); + } + goto out; +} + +static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct srpt_rdma_ch *ch = cq->cq_context; + struct srpt_recv_ioctx *ioctx = + container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe); + + if (wc->status == IB_WC_SUCCESS) { + int req_lim; + + req_lim = atomic_dec_return(&ch->req_lim); + if (unlikely(req_lim < 0)) + pr_err("req_lim = %d < 0\n", req_lim); + srpt_handle_new_iu(ch, ioctx); + } else { + pr_info_ratelimited("receiving failed for ioctx %p with status %d\n", + ioctx, wc->status); + } +} + +/* + * This function must be called from the context in which RDMA completions are + * processed because it accesses the wait list without protection against + * access from other threads. + */ +static void srpt_process_wait_list(struct srpt_rdma_ch *ch) +{ + struct srpt_recv_ioctx *recv_ioctx, *tmp; + + WARN_ON_ONCE(ch->state == CH_CONNECTING); + + if (list_empty(&ch->cmd_wait_list)) + return; + + WARN_ON_ONCE(ch->processing_wait_list); + ch->processing_wait_list = true; + list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list, + wait_list) { + if (!srpt_handle_new_iu(ch, recv_ioctx)) + break; + } + ch->processing_wait_list = false; +} + +/** + * srpt_send_done - send completion callback + * @cq: Completion queue. + * @wc: Work completion. + * + * Note: Although this has not yet been observed during tests, at least in + * theory it is possible that the srpt_get_send_ioctx() call invoked by + * srpt_handle_new_iu() fails. This is possible because the req_lim_delta + * value in each response is set to one, and it is possible that this response + * makes the initiator send a new request before the send completion for that + * response has been processed. This could e.g. happen if the call to + * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or + * if IB retransmission causes generation of the send completion to be + * delayed. Incoming information units for which srpt_get_send_ioctx() fails + * are queued on cmd_wait_list. The code below processes these delayed + * requests one at a time. + */ +static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct srpt_rdma_ch *ch = cq->cq_context; + struct srpt_send_ioctx *ioctx = + container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe); + enum srpt_command_state state; + + state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); + + WARN_ON(state != SRPT_STATE_CMD_RSP_SENT && + state != SRPT_STATE_MGMT_RSP_SENT); + + atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail); + + if (wc->status != IB_WC_SUCCESS) + pr_info("sending response for ioctx 0x%p failed" + " with status %d\n", ioctx, wc->status); + + if (state != SRPT_STATE_DONE) { + transport_generic_free_cmd(&ioctx->cmd, 0); + } else { + pr_err("IB completion has been received too late for" + " wr_id = %u.\n", ioctx->ioctx.index); + } + + srpt_process_wait_list(ch); +} + +/** + * srpt_create_ch_ib - create receive and send completion queues + * @ch: SRPT RDMA channel. + */ +static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) +{ + struct ib_qp_init_attr *qp_init; + struct srpt_port *sport = ch->sport; + struct srpt_device *sdev = sport->sdev; + const struct ib_device_attr *attrs = &sdev->device->attrs; + int sq_size = sport->port_attrib.srp_sq_size; + int i, ret; + + WARN_ON(ch->rq_size < 1); + + ret = -ENOMEM; + qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL); + if (!qp_init) + goto out; + +retry: + ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size, + 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE); + if (IS_ERR(ch->cq)) { + ret = PTR_ERR(ch->cq); + pr_err("failed to create CQ cqe= %d ret= %d\n", + ch->rq_size + sq_size, ret); + goto out; + } + + qp_init->qp_context = (void *)ch; + qp_init->event_handler + = (void(*)(struct ib_event *, void*))srpt_qp_event; + qp_init->send_cq = ch->cq; + qp_init->recv_cq = ch->cq; + qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; + qp_init->qp_type = IB_QPT_RC; + /* + * We divide up our send queue size into half SEND WRs to send the + * completions, and half R/W contexts to actually do the RDMA + * READ/WRITE transfers. Note that we need to allocate CQ slots for + * both both, as RDMA contexts will also post completions for the + * RDMA READ case. + */ + qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr); + qp_init->cap.max_rdma_ctxs = sq_size / 2; + qp_init->cap.max_send_sge = min(attrs->max_send_sge, + SRPT_MAX_SG_PER_WQE); + qp_init->port_num = ch->sport->port; + if (sdev->use_srq) { + qp_init->srq = sdev->srq; + } else { + qp_init->cap.max_recv_wr = ch->rq_size; + qp_init->cap.max_recv_sge = min(attrs->max_recv_sge, + SRPT_MAX_SG_PER_WQE); + } + + if (ch->using_rdma_cm) { + ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init); + ch->qp = ch->rdma_cm.cm_id->qp; + } else { + ch->qp = ib_create_qp(sdev->pd, qp_init); + if (!IS_ERR(ch->qp)) { + ret = srpt_init_ch_qp(ch, ch->qp); + if (ret) + ib_destroy_qp(ch->qp); + } else { + ret = PTR_ERR(ch->qp); + } + } + if (ret) { + bool retry = sq_size > MIN_SRPT_SQ_SIZE; + + if (retry) { + pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n", + sq_size, ret); + ib_free_cq(ch->cq); + sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE); + goto retry; + } else { + pr_err("failed to create queue pair with sq_size = %d (%d)\n", + sq_size, ret); + goto err_destroy_cq; + } + } + + atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr); + + pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n", + __func__, ch->cq->cqe, qp_init->cap.max_send_sge, + qp_init->cap.max_send_wr, ch); + + if (!sdev->use_srq) + for (i = 0; i < ch->rq_size; i++) + srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]); + +out: + kfree(qp_init); + return ret; + +err_destroy_cq: + ch->qp = NULL; + ib_free_cq(ch->cq); + goto out; +} + +static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) +{ + ib_destroy_qp(ch->qp); + ib_free_cq(ch->cq); +} + +/** + * srpt_close_ch - close a RDMA channel + * @ch: SRPT RDMA channel. + * + * Make sure all resources associated with the channel will be deallocated at + * an appropriate time. + * + * Returns true if and only if the channel state has been modified into + * CH_DRAINING. + */ +static bool srpt_close_ch(struct srpt_rdma_ch *ch) +{ + int ret; + + if (!srpt_set_ch_state(ch, CH_DRAINING)) { + pr_debug("%s: already closed\n", ch->sess_name); + return false; + } + + kref_get(&ch->kref); + + ret = srpt_ch_qp_err(ch); + if (ret < 0) + pr_err("%s-%d: changing queue pair into error state failed: %d\n", + ch->sess_name, ch->qp->qp_num, ret); + + ret = srpt_zerolength_write(ch); + if (ret < 0) { + pr_err("%s-%d: queuing zero-length write failed: %d\n", + ch->sess_name, ch->qp->qp_num, ret); + if (srpt_set_ch_state(ch, CH_DISCONNECTED)) + schedule_work(&ch->release_work); + else + WARN_ON_ONCE(true); + } + + kref_put(&ch->kref, srpt_free_ch); + + return true; +} + +/* + * Change the channel state into CH_DISCONNECTING. If a channel has not yet + * reached the connected state, close it. If a channel is in the connected + * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is + * the responsibility of the caller to ensure that this function is not + * invoked concurrently with the code that accepts a connection. This means + * that this function must either be invoked from inside a CM callback + * function or that it must be invoked with the srpt_port.mutex held. + */ +static int srpt_disconnect_ch(struct srpt_rdma_ch *ch) +{ + int ret; + + if (!srpt_set_ch_state(ch, CH_DISCONNECTING)) + return -ENOTCONN; + + if (ch->using_rdma_cm) { + ret = rdma_disconnect(ch->rdma_cm.cm_id); + } else { + ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0); + if (ret < 0) + ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0); + } + + if (ret < 0 && srpt_close_ch(ch)) + ret = 0; + + return ret; +} + +static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch) +{ + struct srpt_nexus *nexus; + struct srpt_rdma_ch *ch2; + bool res = true; + + rcu_read_lock(); + list_for_each_entry(nexus, &sport->nexus_list, entry) { + list_for_each_entry(ch2, &nexus->ch_list, list) { + if (ch2 == ch) { + res = false; + goto done; + } + } + } +done: + rcu_read_unlock(); + + return res; +} + +/* Send DREQ and wait for DREP. */ +static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch) +{ + struct srpt_port *sport = ch->sport; + + pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num, + ch->state); + + mutex_lock(&sport->mutex); + srpt_disconnect_ch(ch); + mutex_unlock(&sport->mutex); + + while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch), + 5 * HZ) == 0) + pr_info("%s(%s-%d state %d): still waiting ...\n", __func__, + ch->sess_name, ch->qp->qp_num, ch->state); + +} + +static void __srpt_close_all_ch(struct srpt_port *sport) +{ + struct srpt_nexus *nexus; + struct srpt_rdma_ch *ch; + + lockdep_assert_held(&sport->mutex); + + list_for_each_entry(nexus, &sport->nexus_list, entry) { + list_for_each_entry(ch, &nexus->ch_list, list) { + if (srpt_disconnect_ch(ch) >= 0) + pr_info("Closing channel %s because target %s_%d has been disabled\n", + ch->sess_name, + sport->sdev->device->name, sport->port); + srpt_close_ch(ch); + } + } +} + +/* + * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if + * it does not yet exist. + */ +static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport, + const u8 i_port_id[16], + const u8 t_port_id[16]) +{ + struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n; + + for (;;) { + mutex_lock(&sport->mutex); + list_for_each_entry(n, &sport->nexus_list, entry) { + if (memcmp(n->i_port_id, i_port_id, 16) == 0 && + memcmp(n->t_port_id, t_port_id, 16) == 0) { + nexus = n; + break; + } + } + if (!nexus && tmp_nexus) { + list_add_tail_rcu(&tmp_nexus->entry, + &sport->nexus_list); + swap(nexus, tmp_nexus); + } + mutex_unlock(&sport->mutex); + + if (nexus) + break; + tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); + if (!tmp_nexus) { + nexus = ERR_PTR(-ENOMEM); + break; + } + INIT_LIST_HEAD(&tmp_nexus->ch_list); + memcpy(tmp_nexus->i_port_id, i_port_id, 16); + memcpy(tmp_nexus->t_port_id, t_port_id, 16); + } + + kfree(tmp_nexus); + + return nexus; +} + +static void srpt_set_enabled(struct srpt_port *sport, bool enabled) + __must_hold(&sport->mutex) +{ + lockdep_assert_held(&sport->mutex); + + if (sport->enabled == enabled) + return; + sport->enabled = enabled; + if (!enabled) + __srpt_close_all_ch(sport); +} + +static void srpt_free_ch(struct kref *kref) +{ + struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref); + + kfree_rcu(ch, rcu); +} + +/* + * Shut down the SCSI target session, tell the connection manager to + * disconnect the associated RDMA channel, transition the QP to the error + * state and remove the channel from the channel list. This function is + * typically called from inside srpt_zerolength_write_done(). Concurrent + * srpt_zerolength_write() calls from inside srpt_close_ch() are possible + * as long as the channel is on sport->nexus_list. + */ +static void srpt_release_channel_work(struct work_struct *w) +{ + struct srpt_rdma_ch *ch; + struct srpt_device *sdev; + struct srpt_port *sport; + struct se_session *se_sess; + + ch = container_of(w, struct srpt_rdma_ch, release_work); + pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num); + + sdev = ch->sport->sdev; + BUG_ON(!sdev); + + se_sess = ch->sess; + BUG_ON(!se_sess); + + target_sess_cmd_list_set_waiting(se_sess); + target_wait_for_sess_cmds(se_sess); + + target_remove_session(se_sess); + ch->sess = NULL; + + if (ch->using_rdma_cm) + rdma_destroy_id(ch->rdma_cm.cm_id); + else + ib_destroy_cm_id(ch->ib_cm.cm_id); + + sport = ch->sport; + mutex_lock(&sport->mutex); + list_del_rcu(&ch->list); + mutex_unlock(&sport->mutex); + + srpt_destroy_ch_ib(ch); + + srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, + ch->sport->sdev, ch->rq_size, + ch->max_rsp_size, DMA_TO_DEVICE); + + srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, + sdev, ch->rq_size, + srp_max_req_size, DMA_FROM_DEVICE); + + wake_up(&sport->ch_releaseQ); + + kref_put(&ch->kref, srpt_free_ch); +} + +/** + * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED + * @sdev: HCA through which the login request was received. + * @ib_cm_id: IB/CM connection identifier in case of IB/CM. + * @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM. + * @port_num: Port through which the REQ message was received. + * @pkey: P_Key of the incoming connection. + * @req: SRP login request. + * @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted + * the login request. + * + * Ownership of the cm_id is transferred to the target session if this + * function returns zero. Otherwise the caller remains the owner of cm_id. + */ +static int srpt_cm_req_recv(struct srpt_device *const sdev, + struct ib_cm_id *ib_cm_id, + struct rdma_cm_id *rdma_cm_id, + u8 port_num, __be16 pkey, + const struct srp_login_req *req, + const char *src_addr) +{ + struct srpt_port *sport = &sdev->port[port_num - 1]; + struct srpt_nexus *nexus; + struct srp_login_rsp *rsp = NULL; + struct srp_login_rej *rej = NULL; + union { + struct rdma_conn_param rdma_cm; + struct ib_cm_rep_param ib_cm; + } *rep_param = NULL; + struct srpt_rdma_ch *ch = NULL; + char i_port_id[36]; + u32 it_iu_len; + int i, ret; + + WARN_ON_ONCE(irqs_disabled()); + + if (WARN_ON(!sdev || !req)) + return -EINVAL; + + it_iu_len = be32_to_cpu(req->req_it_iu_len); + + pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n", + req->initiator_port_id, req->target_port_id, it_iu_len, + port_num, &sport->gid, be16_to_cpu(pkey)); + + nexus = srpt_get_nexus(sport, req->initiator_port_id, + req->target_port_id); + if (IS_ERR(nexus)) { + ret = PTR_ERR(nexus); + goto out; + } + + ret = -ENOMEM; + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); + rej = kzalloc(sizeof(*rej), GFP_KERNEL); + rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL); + if (!rsp || !rej || !rep_param) + goto out; + + ret = -EINVAL; + if (it_iu_len > srp_max_req_size || it_iu_len < 64) { + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); + pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n", + it_iu_len, 64, srp_max_req_size); + goto reject; + } + + if (!sport->enabled) { + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n", + sport->sdev->device->name, port_num); + goto reject; + } + + if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) + || *(__be64 *)(req->target_port_id + 8) != + cpu_to_be64(srpt_service_guid)) { + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); + pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n"); + goto reject; + } + + ret = -ENOMEM; + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (!ch) { + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + pr_err("rejected SRP_LOGIN_REQ because out of memory.\n"); + goto reject; + } + + kref_init(&ch->kref); + ch->pkey = be16_to_cpu(pkey); + ch->nexus = nexus; + ch->zw_cqe.done = srpt_zerolength_write_done; + INIT_WORK(&ch->release_work, srpt_release_channel_work); + ch->sport = sport; + if (ib_cm_id) { + ch->ib_cm.cm_id = ib_cm_id; + ib_cm_id->context = ch; + } else { + ch->using_rdma_cm = true; + ch->rdma_cm.cm_id = rdma_cm_id; + rdma_cm_id->context = ch; + } + /* + * ch->rq_size should be at least as large as the initiator queue + * depth to avoid that the initiator driver has to report QUEUE_FULL + * to the SCSI mid-layer. + */ + ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr); + spin_lock_init(&ch->spinlock); + ch->state = CH_CONNECTING; + INIT_LIST_HEAD(&ch->cmd_wait_list); + ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size; + + ch->ioctx_ring = (struct srpt_send_ioctx **) + srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, + sizeof(*ch->ioctx_ring[0]), + ch->max_rsp_size, DMA_TO_DEVICE); + if (!ch->ioctx_ring) { + pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n"); + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + goto free_ch; + } + + INIT_LIST_HEAD(&ch->free_list); + for (i = 0; i < ch->rq_size; i++) { + ch->ioctx_ring[i]->ch = ch; + list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); + } + if (!sdev->use_srq) { + ch->ioctx_recv_ring = (struct srpt_recv_ioctx **) + srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, + sizeof(*ch->ioctx_recv_ring[0]), + srp_max_req_size, + DMA_FROM_DEVICE); + if (!ch->ioctx_recv_ring) { + pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n"); + rej->reason = + cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + goto free_ring; + } + for (i = 0; i < ch->rq_size; i++) + INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list); + } + + ret = srpt_create_ch_ib(ch); + if (ret) { + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n"); + goto free_recv_ring; + } + + strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name)); + snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx", + be64_to_cpu(*(__be64 *)nexus->i_port_id), + be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8))); + + pr_debug("registering session %s\n", ch->sess_name); + + if (sport->port_guid_tpg.se_tpg_wwn) + ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0, + TARGET_PROT_NORMAL, + ch->sess_name, ch, NULL); + if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) + ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0, + TARGET_PROT_NORMAL, i_port_id, ch, + NULL); + /* Retry without leading "0x" */ + if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) + ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0, + TARGET_PROT_NORMAL, + i_port_id + 2, ch, NULL); + if (IS_ERR_OR_NULL(ch->sess)) { + WARN_ON_ONCE(ch->sess == NULL); + ret = PTR_ERR(ch->sess); + ch->sess = NULL; + pr_info("Rejected login for initiator %s: ret = %d.\n", + ch->sess_name, ret); + rej->reason = cpu_to_be32(ret == -ENOMEM ? + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES : + SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); + goto destroy_ib; + } + + mutex_lock(&sport->mutex); + + if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) { + struct srpt_rdma_ch *ch2; + + rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN; + + list_for_each_entry(ch2, &nexus->ch_list, list) { + if (srpt_disconnect_ch(ch2) < 0) + continue; + pr_info("Relogin - closed existing channel %s\n", + ch2->sess_name); + rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED; + } + } else { + rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; + } + + list_add_tail_rcu(&ch->list, &nexus->ch_list); + + if (!sport->enabled) { + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n", + sdev->device->name, port_num); + mutex_unlock(&sport->mutex); + ret = -EINVAL; + goto reject; + } + + mutex_unlock(&sport->mutex); + + ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp); + if (ret) { + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n", + ret); + goto reject; + } + + pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess, + ch->sess_name, ch); + + /* create srp_login_response */ + rsp->opcode = SRP_LOGIN_RSP; + rsp->tag = req->tag; + rsp->max_it_iu_len = req->req_it_iu_len; + rsp->max_ti_iu_len = req->req_it_iu_len; + ch->max_ti_iu_len = it_iu_len; + rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | + SRP_BUF_FORMAT_INDIRECT); + rsp->req_lim_delta = cpu_to_be32(ch->rq_size); + atomic_set(&ch->req_lim, ch->rq_size); + atomic_set(&ch->req_lim_delta, 0); + + /* create cm reply */ + if (ch->using_rdma_cm) { + rep_param->rdma_cm.private_data = (void *)rsp; + rep_param->rdma_cm.private_data_len = sizeof(*rsp); + rep_param->rdma_cm.rnr_retry_count = 7; + rep_param->rdma_cm.flow_control = 1; + rep_param->rdma_cm.responder_resources = 4; + rep_param->rdma_cm.initiator_depth = 4; + } else { + rep_param->ib_cm.qp_num = ch->qp->qp_num; + rep_param->ib_cm.private_data = (void *)rsp; + rep_param->ib_cm.private_data_len = sizeof(*rsp); + rep_param->ib_cm.rnr_retry_count = 7; + rep_param->ib_cm.flow_control = 1; + rep_param->ib_cm.failover_accepted = 0; + rep_param->ib_cm.srq = 1; + rep_param->ib_cm.responder_resources = 4; + rep_param->ib_cm.initiator_depth = 4; + } + + /* + * Hold the sport mutex while accepting a connection to avoid that + * srpt_disconnect_ch() is invoked concurrently with this code. + */ + mutex_lock(&sport->mutex); + if (sport->enabled && ch->state == CH_CONNECTING) { + if (ch->using_rdma_cm) + ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm); + else + ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm); + } else { + ret = -EINVAL; + } + mutex_unlock(&sport->mutex); + + switch (ret) { + case 0: + break; + case -EINVAL: + goto reject; + default: + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n", + ret); + goto reject; + } + + goto out; + +destroy_ib: + srpt_destroy_ch_ib(ch); + +free_recv_ring: + srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, + ch->sport->sdev, ch->rq_size, + srp_max_req_size, DMA_FROM_DEVICE); + +free_ring: + srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, + ch->sport->sdev, ch->rq_size, + ch->max_rsp_size, DMA_TO_DEVICE); + +free_ch: + if (rdma_cm_id) + rdma_cm_id->context = NULL; + else + ib_cm_id->context = NULL; + kfree(ch); + ch = NULL; + + WARN_ON_ONCE(ret == 0); + +reject: + pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason)); + rej->opcode = SRP_LOGIN_REJ; + rej->tag = req->tag; + rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | + SRP_BUF_FORMAT_INDIRECT); + + if (rdma_cm_id) + rdma_reject(rdma_cm_id, rej, sizeof(*rej)); + else + ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, + rej, sizeof(*rej)); + + if (ch && ch->sess) { + srpt_close_ch(ch); + /* + * Tell the caller not to free cm_id since + * srpt_release_channel_work() will do that. + */ + ret = 0; + } + +out: + kfree(rep_param); + kfree(rsp); + kfree(rej); + + return ret; +} + +static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id, + const struct ib_cm_req_event_param *param, + void *private_data) +{ + char sguid[40]; + + srpt_format_guid(sguid, sizeof(sguid), + ¶m->primary_path->dgid.global.interface_id); + + return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port, + param->primary_path->pkey, + private_data, sguid); +} + +static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event) +{ + struct srpt_device *sdev; + struct srp_login_req req; + const struct srp_login_req_rdma *req_rdma; + char src_addr[40]; + + sdev = ib_get_client_data(cm_id->device, &srpt_client); + if (!sdev) + return -ECONNREFUSED; + + if (event->param.conn.private_data_len < sizeof(*req_rdma)) + return -EINVAL; + + /* Transform srp_login_req_rdma into srp_login_req. */ + req_rdma = event->param.conn.private_data; + memset(&req, 0, sizeof(req)); + req.opcode = req_rdma->opcode; + req.tag = req_rdma->tag; + req.req_it_iu_len = req_rdma->req_it_iu_len; + req.req_buf_fmt = req_rdma->req_buf_fmt; + req.req_flags = req_rdma->req_flags; + memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16); + memcpy(req.target_port_id, req_rdma->target_port_id, 16); + + snprintf(src_addr, sizeof(src_addr), "%pIS", + &cm_id->route.addr.src_addr); + + return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num, + cm_id->route.path_rec->pkey, &req, src_addr); +} + +static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch, + enum ib_cm_rej_reason reason, + const u8 *private_data, + u8 private_data_len) +{ + char *priv = NULL; + int i; + + if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1, + GFP_KERNEL))) { + for (i = 0; i < private_data_len; i++) + sprintf(priv + 3 * i, " %02x", private_data[i]); + } + pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n", + ch->sess_name, ch->qp->qp_num, reason, private_data_len ? + "; private data" : "", priv ? priv : " (?)"); + kfree(priv); +} + +/** + * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event + * @ch: SRPT RDMA channel. + * + * An RTU (ready to use) message indicates that the connection has been + * established and that the recipient may begin transmitting. + */ +static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch) +{ + int ret; + + ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp); + if (ret < 0) { + pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name, + ch->qp->qp_num); + srpt_close_ch(ch); + return; + } + + /* + * Note: calling srpt_close_ch() if the transition to the LIVE state + * fails is not necessary since that means that that function has + * already been invoked from another thread. + */ + if (!srpt_set_ch_state(ch, CH_LIVE)) { + pr_err("%s-%d: channel transition to LIVE state failed\n", + ch->sess_name, ch->qp->qp_num); + return; + } + + /* Trigger wait list processing. */ + ret = srpt_zerolength_write(ch); + WARN_ONCE(ret < 0, "%d\n", ret); +} + +/** + * srpt_cm_handler - IB connection manager callback function + * @cm_id: IB/CM connection identifier. + * @event: IB/CM event. + * + * A non-zero return value will cause the caller destroy the CM ID. + * + * Note: srpt_cm_handler() must only return a non-zero value when transferring + * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning + * a non-zero value in any other case will trigger a race with the + * ib_destroy_cm_id() call in srpt_release_channel(). + */ +static int srpt_cm_handler(struct ib_cm_id *cm_id, + const struct ib_cm_event *event) +{ + struct srpt_rdma_ch *ch = cm_id->context; + int ret; + + ret = 0; + switch (event->event) { + case IB_CM_REQ_RECEIVED: + ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd, + event->private_data); + break; + case IB_CM_REJ_RECEIVED: + srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason, + event->private_data, + IB_CM_REJ_PRIVATE_DATA_SIZE); + break; + case IB_CM_RTU_RECEIVED: + case IB_CM_USER_ESTABLISHED: + srpt_cm_rtu_recv(ch); + break; + case IB_CM_DREQ_RECEIVED: + srpt_disconnect_ch(ch); + break; + case IB_CM_DREP_RECEIVED: + pr_info("Received CM DREP message for ch %s-%d.\n", + ch->sess_name, ch->qp->qp_num); + srpt_close_ch(ch); + break; + case IB_CM_TIMEWAIT_EXIT: + pr_info("Received CM TimeWait exit for ch %s-%d.\n", + ch->sess_name, ch->qp->qp_num); + srpt_close_ch(ch); + break; + case IB_CM_REP_ERROR: + pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name, + ch->qp->qp_num); + break; + case IB_CM_DREQ_ERROR: + pr_info("Received CM DREQ ERROR event.\n"); + break; + case IB_CM_MRA_RECEIVED: + pr_info("Received CM MRA event\n"); + break; + default: + pr_err("received unrecognized CM event %d\n", event->event); + break; + } + + return ret; +} + +static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id, + struct rdma_cm_event *event) +{ + struct srpt_rdma_ch *ch = cm_id->context; + int ret = 0; + + switch (event->event) { + case RDMA_CM_EVENT_CONNECT_REQUEST: + ret = srpt_rdma_cm_req_recv(cm_id, event); + break; + case RDMA_CM_EVENT_REJECTED: + srpt_cm_rej_recv(ch, event->status, + event->param.conn.private_data, + event->param.conn.private_data_len); + break; + case RDMA_CM_EVENT_ESTABLISHED: + srpt_cm_rtu_recv(ch); + break; + case RDMA_CM_EVENT_DISCONNECTED: + if (ch->state < CH_DISCONNECTING) + srpt_disconnect_ch(ch); + else + srpt_close_ch(ch); + break; + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + srpt_close_ch(ch); + break; + case RDMA_CM_EVENT_UNREACHABLE: + pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name, + ch->qp->qp_num); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + case RDMA_CM_EVENT_ADDR_CHANGE: + break; + default: + pr_err("received unrecognized RDMA CM event %d\n", + event->event); + break; + } + + return ret; +} + +static int srpt_write_pending_status(struct se_cmd *se_cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); + return ioctx->state == SRPT_STATE_NEED_DATA; +} + +/* + * srpt_write_pending - Start data transfer from initiator to target (write). + */ +static int srpt_write_pending(struct se_cmd *se_cmd) +{ + struct srpt_send_ioctx *ioctx = + container_of(se_cmd, struct srpt_send_ioctx, cmd); + struct srpt_rdma_ch *ch = ioctx->ch; + struct ib_send_wr *first_wr = NULL; + struct ib_cqe *cqe = &ioctx->rdma_cqe; + enum srpt_command_state new_state; + int ret, i; + + new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA); + WARN_ON(new_state == SRPT_STATE_DONE); + + if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) { + pr_warn("%s: IB send queue full (needed %d)\n", + __func__, ioctx->n_rdma); + ret = -ENOMEM; + goto out_undo; + } + + cqe->done = srpt_rdma_read_done; + for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) { + struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; + + first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port, + cqe, first_wr); + cqe = NULL; + } + + ret = ib_post_send(ch->qp, first_wr, NULL); + if (ret) { + pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n", + __func__, ret, ioctx->n_rdma, + atomic_read(&ch->sq_wr_avail)); + goto out_undo; + } + + return 0; +out_undo: + atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); + return ret; +} + +static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status) +{ + switch (tcm_mgmt_status) { + case TMR_FUNCTION_COMPLETE: + return SRP_TSK_MGMT_SUCCESS; + case TMR_FUNCTION_REJECTED: + return SRP_TSK_MGMT_FUNC_NOT_SUPP; + } + return SRP_TSK_MGMT_FAILED; +} + +/** + * srpt_queue_response - transmit the response to a SCSI command + * @cmd: SCSI target command. + * + * Callback function called by the TCM core. Must not block since it can be + * invoked on the context of the IB completion handler. + */ +static void srpt_queue_response(struct se_cmd *cmd) +{ + struct srpt_send_ioctx *ioctx = + container_of(cmd, struct srpt_send_ioctx, cmd); + struct srpt_rdma_ch *ch = ioctx->ch; + struct srpt_device *sdev = ch->sport->sdev; + struct ib_send_wr send_wr, *first_wr = &send_wr; + struct ib_sge sge; + enum srpt_command_state state; + int resp_len, ret, i; + u8 srp_tm_status; + + BUG_ON(!ch); + + state = ioctx->state; + switch (state) { + case SRPT_STATE_NEW: + case SRPT_STATE_DATA_IN: + ioctx->state = SRPT_STATE_CMD_RSP_SENT; + break; + case SRPT_STATE_MGMT: + ioctx->state = SRPT_STATE_MGMT_RSP_SENT; + break; + default: + WARN(true, "ch %p; cmd %d: unexpected command state %d\n", + ch, ioctx->ioctx.index, ioctx->state); + break; + } + + if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) + return; + + /* For read commands, transfer the data to the initiator. */ + if (ioctx->cmd.data_direction == DMA_FROM_DEVICE && + ioctx->cmd.data_length && + !ioctx->queue_status_only) { + for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) { + struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; + + first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, + ch->sport->port, NULL, first_wr); + } + } + + if (state != SRPT_STATE_MGMT) + resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag, + cmd->scsi_status); + else { + srp_tm_status + = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); + resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, + ioctx->cmd.tag); + } + + atomic_inc(&ch->req_lim); + + if (unlikely(atomic_sub_return(1 + ioctx->n_rdma, + &ch->sq_wr_avail) < 0)) { + pr_warn("%s: IB send queue full (needed %d)\n", + __func__, ioctx->n_rdma); + ret = -ENOMEM; + goto out; + } + + ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len, + DMA_TO_DEVICE); + + sge.addr = ioctx->ioctx.dma; + sge.length = resp_len; + sge.lkey = sdev->lkey; + + ioctx->ioctx.cqe.done = srpt_send_done; + send_wr.next = NULL; + send_wr.wr_cqe = &ioctx->ioctx.cqe; + send_wr.sg_list = &sge; + send_wr.num_sge = 1; + send_wr.opcode = IB_WR_SEND; + send_wr.send_flags = IB_SEND_SIGNALED; + + ret = ib_post_send(ch->qp, first_wr, NULL); + if (ret < 0) { + pr_err("%s: sending cmd response failed for tag %llu (%d)\n", + __func__, ioctx->cmd.tag, ret); + goto out; + } + + return; + +out: + atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail); + atomic_dec(&ch->req_lim); + srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); + target_put_sess_cmd(&ioctx->cmd); +} + +static int srpt_queue_data_in(struct se_cmd *cmd) +{ + srpt_queue_response(cmd); + return 0; +} + +static void srpt_queue_tm_rsp(struct se_cmd *cmd) +{ + srpt_queue_response(cmd); +} + +/* + * This function is called for aborted commands if no response is sent to the + * initiator. Make sure that the credits freed by aborting a command are + * returned to the initiator the next time a response is sent by incrementing + * ch->req_lim_delta. + */ +static void srpt_aborted_task(struct se_cmd *cmd) +{ + struct srpt_send_ioctx *ioctx = container_of(cmd, + struct srpt_send_ioctx, cmd); + struct srpt_rdma_ch *ch = ioctx->ch; + + atomic_inc(&ch->req_lim_delta); +} + +static int srpt_queue_status(struct se_cmd *cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); + BUG_ON(ioctx->sense_data != cmd->sense_buffer); + if (cmd->se_cmd_flags & + (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE)) + WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION); + ioctx->queue_status_only = true; + srpt_queue_response(cmd); + return 0; +} + +static void srpt_refresh_port_work(struct work_struct *work) +{ + struct srpt_port *sport = container_of(work, struct srpt_port, work); + + srpt_refresh_port(sport); +} + +static bool srpt_ch_list_empty(struct srpt_port *sport) +{ + struct srpt_nexus *nexus; + bool res = true; + + rcu_read_lock(); + list_for_each_entry(nexus, &sport->nexus_list, entry) + if (!list_empty(&nexus->ch_list)) + res = false; + rcu_read_unlock(); + + return res; +} + +/** + * srpt_release_sport - disable login and wait for associated channels + * @sport: SRPT HCA port. + */ +static int srpt_release_sport(struct srpt_port *sport) +{ + struct srpt_nexus *nexus, *next_n; + struct srpt_rdma_ch *ch; + + WARN_ON_ONCE(irqs_disabled()); + + mutex_lock(&sport->mutex); + srpt_set_enabled(sport, false); + mutex_unlock(&sport->mutex); + + while (wait_event_timeout(sport->ch_releaseQ, + srpt_ch_list_empty(sport), 5 * HZ) <= 0) { + pr_info("%s_%d: waiting for session unregistration ...\n", + sport->sdev->device->name, sport->port); + rcu_read_lock(); + list_for_each_entry(nexus, &sport->nexus_list, entry) { + list_for_each_entry(ch, &nexus->ch_list, list) { + pr_info("%s-%d: state %s\n", + ch->sess_name, ch->qp->qp_num, + get_ch_state_name(ch->state)); + } + } + rcu_read_unlock(); + } + + mutex_lock(&sport->mutex); + list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) { + list_del(&nexus->entry); + kfree_rcu(nexus, rcu); + } + mutex_unlock(&sport->mutex); + + return 0; +} + +static struct se_wwn *__srpt_lookup_wwn(const char *name) +{ + struct ib_device *dev; + struct srpt_device *sdev; + struct srpt_port *sport; + int i; + + list_for_each_entry(sdev, &srpt_dev_list, list) { + dev = sdev->device; + if (!dev) + continue; + + for (i = 0; i < dev->phys_port_cnt; i++) { + sport = &sdev->port[i]; + + if (strcmp(sport->port_guid, name) == 0) + return &sport->port_guid_wwn; + if (strcmp(sport->port_gid, name) == 0) + return &sport->port_gid_wwn; + } + } + + return NULL; +} + +static struct se_wwn *srpt_lookup_wwn(const char *name) +{ + struct se_wwn *wwn; + + spin_lock(&srpt_dev_lock); + wwn = __srpt_lookup_wwn(name); + spin_unlock(&srpt_dev_lock); + + return wwn; +} + +static void srpt_free_srq(struct srpt_device *sdev) +{ + if (!sdev->srq) + return; + + ib_destroy_srq(sdev->srq); + srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, + sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE); + sdev->srq = NULL; +} + +static int srpt_alloc_srq(struct srpt_device *sdev) +{ + struct ib_srq_init_attr srq_attr = { + .event_handler = srpt_srq_event, + .srq_context = (void *)sdev, + .attr.max_wr = sdev->srq_size, + .attr.max_sge = 1, + .srq_type = IB_SRQT_BASIC, + }; + struct ib_device *device = sdev->device; + struct ib_srq *srq; + int i; + + WARN_ON_ONCE(sdev->srq); + srq = ib_create_srq(sdev->pd, &srq_attr); + if (IS_ERR(srq)) { + pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq)); + return PTR_ERR(srq); + } + + pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size, + sdev->device->attrs.max_srq_wr, device->name); + + sdev->ioctx_ring = (struct srpt_recv_ioctx **) + srpt_alloc_ioctx_ring(sdev, sdev->srq_size, + sizeof(*sdev->ioctx_ring[0]), + srp_max_req_size, DMA_FROM_DEVICE); + if (!sdev->ioctx_ring) { + ib_destroy_srq(srq); + return -ENOMEM; + } + + sdev->use_srq = true; + sdev->srq = srq; + + for (i = 0; i < sdev->srq_size; ++i) { + INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list); + srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]); + } + + return 0; +} + +static int srpt_use_srq(struct srpt_device *sdev, bool use_srq) +{ + struct ib_device *device = sdev->device; + int ret = 0; + + if (!use_srq) { + srpt_free_srq(sdev); + sdev->use_srq = false; + } else if (use_srq && !sdev->srq) { + ret = srpt_alloc_srq(sdev); + } + pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name, + sdev->use_srq, ret); + return ret; +} + +/** + * srpt_add_one - InfiniBand device addition callback function + * @device: Describes a HCA. + */ +static void srpt_add_one(struct ib_device *device) +{ + struct srpt_device *sdev; + struct srpt_port *sport; + int i, ret; + + pr_debug("device = %p\n", device); + + sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt), + GFP_KERNEL); + if (!sdev) + goto err; + + sdev->device = device; + mutex_init(&sdev->sdev_mutex); + + sdev->pd = ib_alloc_pd(device, 0); + if (IS_ERR(sdev->pd)) + goto free_dev; + + sdev->lkey = sdev->pd->local_dma_lkey; + + sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr); + + srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq); + + if (!srpt_service_guid) + srpt_service_guid = be64_to_cpu(device->node_guid); + + if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND) + sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev); + if (IS_ERR(sdev->cm_id)) { + pr_info("ib_create_cm_id() failed: %ld\n", + PTR_ERR(sdev->cm_id)); + sdev->cm_id = NULL; + if (!rdma_cm_id) + goto err_ring; + } + + /* print out target login information */ + pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx," + "pkey=ffff,service_id=%016llx\n", srpt_service_guid, + srpt_service_guid, srpt_service_guid); + + /* + * We do not have a consistent service_id (ie. also id_ext of target_id) + * to identify this target. We currently use the guid of the first HCA + * in the system as service_id; therefore, the target_id will change + * if this HCA is gone bad and replaced by different HCA + */ + ret = sdev->cm_id ? + ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) : + 0; + if (ret < 0) { + pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret, + sdev->cm_id->state); + goto err_cm; + } + + INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, + srpt_event_handler); + ib_register_event_handler(&sdev->event_handler); + + for (i = 1; i <= sdev->device->phys_port_cnt; i++) { + sport = &sdev->port[i - 1]; + INIT_LIST_HEAD(&sport->nexus_list); + init_waitqueue_head(&sport->ch_releaseQ); + mutex_init(&sport->mutex); + sport->sdev = sdev; + sport->port = i; + sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE; + sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE; + sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; + sport->port_attrib.use_srq = false; + INIT_WORK(&sport->work, srpt_refresh_port_work); + + if (srpt_refresh_port(sport)) { + pr_err("MAD registration failed for %s-%d.\n", + sdev->device->name, i); + goto err_event; + } + } + + spin_lock(&srpt_dev_lock); + list_add_tail(&sdev->list, &srpt_dev_list); + spin_unlock(&srpt_dev_lock); + +out: + ib_set_client_data(device, &srpt_client, sdev); + pr_debug("added %s.\n", device->name); + return; + +err_event: + ib_unregister_event_handler(&sdev->event_handler); +err_cm: + if (sdev->cm_id) + ib_destroy_cm_id(sdev->cm_id); +err_ring: + srpt_free_srq(sdev); + ib_dealloc_pd(sdev->pd); +free_dev: + kfree(sdev); +err: + sdev = NULL; + pr_info("%s(%s) failed.\n", __func__, device->name); + goto out; +} + +/** + * srpt_remove_one - InfiniBand device removal callback function + * @device: Describes a HCA. + * @client_data: The value passed as the third argument to ib_set_client_data(). + */ +static void srpt_remove_one(struct ib_device *device, void *client_data) +{ + struct srpt_device *sdev = client_data; + int i; + + if (!sdev) { + pr_info("%s(%s): nothing to do.\n", __func__, device->name); + return; + } + + srpt_unregister_mad_agent(sdev); + + ib_unregister_event_handler(&sdev->event_handler); + + /* Cancel any work queued by the just unregistered IB event handler. */ + for (i = 0; i < sdev->device->phys_port_cnt; i++) + cancel_work_sync(&sdev->port[i].work); + + if (sdev->cm_id) + ib_destroy_cm_id(sdev->cm_id); + + ib_set_client_data(device, &srpt_client, NULL); + + /* + * Unregistering a target must happen after destroying sdev->cm_id + * such that no new SRP_LOGIN_REQ information units can arrive while + * destroying the target. + */ + spin_lock(&srpt_dev_lock); + list_del(&sdev->list); + spin_unlock(&srpt_dev_lock); + + for (i = 0; i < sdev->device->phys_port_cnt; i++) + srpt_release_sport(&sdev->port[i]); + + srpt_free_srq(sdev); + + ib_dealloc_pd(sdev->pd); + + kfree(sdev); +} + +static struct ib_client srpt_client = { + .name = DRV_NAME, + .add = srpt_add_one, + .remove = srpt_remove_one +}; + +static int srpt_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int srpt_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static char *srpt_get_fabric_name(void) +{ + return "srpt"; +} + +static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg) +{ + return tpg->se_tpg_wwn->priv; +} + +static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) +{ + struct srpt_port *sport = srpt_tpg_to_sport(tpg); + + WARN_ON_ONCE(tpg != &sport->port_guid_tpg && + tpg != &sport->port_gid_tpg); + return tpg == &sport->port_guid_tpg ? sport->port_guid : + sport->port_gid; +} + +static u16 srpt_get_tag(struct se_portal_group *tpg) +{ + return 1; +} + +static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static void srpt_release_cmd(struct se_cmd *se_cmd) +{ + struct srpt_send_ioctx *ioctx = container_of(se_cmd, + struct srpt_send_ioctx, cmd); + struct srpt_rdma_ch *ch = ioctx->ch; + unsigned long flags; + + WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE && + !(ioctx->cmd.transport_state & CMD_T_ABORTED)); + + if (ioctx->n_rw_ctx) { + srpt_free_rw_ctxs(ch, ioctx); + ioctx->n_rw_ctx = 0; + } + + spin_lock_irqsave(&ch->spinlock, flags); + list_add(&ioctx->free_list, &ch->free_list); + spin_unlock_irqrestore(&ch->spinlock, flags); +} + +/** + * srpt_close_session - forcibly close a session + * @se_sess: SCSI target session. + * + * Callback function invoked by the TCM core to clean up sessions associated + * with a node ACL when the user invokes + * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id + */ +static void srpt_close_session(struct se_session *se_sess) +{ + struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; + + srpt_disconnect_ch_sync(ch); +} + +/** + * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB) + * @se_sess: SCSI target session. + * + * A quote from RFC 4455 (SCSI-MIB) about this MIB object: + * This object represents an arbitrary integer used to uniquely identify a + * particular attached remote initiator port to a particular SCSI target port + * within a particular SCSI target device within a particular SCSI instance. + */ +static u32 srpt_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static void srpt_set_default_node_attrs(struct se_node_acl *nacl) +{ +} + +/* Note: only used from inside debug printk's by the TCM core. */ +static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); + return ioctx->state; +} + +static int srpt_parse_guid(u64 *guid, const char *name) +{ + u16 w[4]; + int ret = -EINVAL; + + if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4) + goto out; + *guid = get_unaligned_be64(w); + ret = 0; +out: + return ret; +} + +/** + * srpt_parse_i_port_id - parse an initiator port ID + * @name: ASCII representation of a 128-bit initiator port ID. + * @i_port_id: Binary 128-bit port ID. + */ +static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) +{ + const char *p; + unsigned len, count, leading_zero_bytes; + int ret; + + p = name; + if (strncasecmp(p, "0x", 2) == 0) + p += 2; + ret = -EINVAL; + len = strlen(p); + if (len % 2) + goto out; + count = min(len / 2, 16U); + leading_zero_bytes = 16 - count; + memset(i_port_id, 0, leading_zero_bytes); + ret = hex2bin(i_port_id + leading_zero_bytes, p, count); + +out: + return ret; +} + +/* + * configfs callback function invoked for mkdir + * /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id + * + * i_port_id must be an initiator port GUID, GID or IP address. See also the + * target_alloc_session() calls in this driver. Examples of valid initiator + * port IDs: + * 0x0000000000000000505400fffe4a0b7b + * 0000000000000000505400fffe4a0b7b + * 5054:00ff:fe4a:0b7b + * 192.168.122.76 + */ +static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name) +{ + struct sockaddr_storage sa; + u64 guid; + u8 i_port_id[16]; + int ret; + + ret = srpt_parse_guid(&guid, name); + if (ret < 0) + ret = srpt_parse_i_port_id(i_port_id, name); + if (ret < 0) + ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL, + &sa); + if (ret < 0) + pr_err("invalid initiator port ID %s\n", name); + return ret; +} + +static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + + return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size); +} + +static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + unsigned long val; + int ret; + + ret = kstrtoul(page, 0, &val); + if (ret < 0) { + pr_err("kstrtoul() failed with ret: %d\n", ret); + return -EINVAL; + } + if (val > MAX_SRPT_RDMA_SIZE) { + pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val, + MAX_SRPT_RDMA_SIZE); + return -EINVAL; + } + if (val < DEFAULT_MAX_RDMA_SIZE) { + pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n", + val, DEFAULT_MAX_RDMA_SIZE); + return -EINVAL; + } + sport->port_attrib.srp_max_rdma_size = val; + + return count; +} + +static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + + return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size); +} + +static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + unsigned long val; + int ret; + + ret = kstrtoul(page, 0, &val); + if (ret < 0) { + pr_err("kstrtoul() failed with ret: %d\n", ret); + return -EINVAL; + } + if (val > MAX_SRPT_RSP_SIZE) { + pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val, + MAX_SRPT_RSP_SIZE); + return -EINVAL; + } + if (val < MIN_MAX_RSP_SIZE) { + pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val, + MIN_MAX_RSP_SIZE); + return -EINVAL; + } + sport->port_attrib.srp_max_rsp_size = val; + + return count; +} + +static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + + return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size); +} + +static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + unsigned long val; + int ret; + + ret = kstrtoul(page, 0, &val); + if (ret < 0) { + pr_err("kstrtoul() failed with ret: %d\n", ret); + return -EINVAL; + } + if (val > MAX_SRPT_SRQ_SIZE) { + pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val, + MAX_SRPT_SRQ_SIZE); + return -EINVAL; + } + if (val < MIN_SRPT_SRQ_SIZE) { + pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val, + MIN_SRPT_SRQ_SIZE); + return -EINVAL; + } + sport->port_attrib.srp_sq_size = val; + + return count; +} + +static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + + return sprintf(page, "%d\n", sport->port_attrib.use_srq); +} + +static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = attrib_to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + struct srpt_device *sdev = sport->sdev; + unsigned long val; + bool enabled; + int ret; + + ret = kstrtoul(page, 0, &val); + if (ret < 0) + return ret; + if (val != !!val) + return -EINVAL; + + ret = mutex_lock_interruptible(&sdev->sdev_mutex); + if (ret < 0) + return ret; + ret = mutex_lock_interruptible(&sport->mutex); + if (ret < 0) + goto unlock_sdev; + enabled = sport->enabled; + /* Log out all initiator systems before changing 'use_srq'. */ + srpt_set_enabled(sport, false); + sport->port_attrib.use_srq = val; + srpt_use_srq(sdev, sport->port_attrib.use_srq); + srpt_set_enabled(sport, enabled); + ret = count; + mutex_unlock(&sport->mutex); +unlock_sdev: + mutex_unlock(&sdev->sdev_mutex); + + return ret; +} + +CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size); +CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size); +CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size); +CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq); + +static struct configfs_attribute *srpt_tpg_attrib_attrs[] = { + &srpt_tpg_attrib_attr_srp_max_rdma_size, + &srpt_tpg_attrib_attr_srp_max_rsp_size, + &srpt_tpg_attrib_attr_srp_sq_size, + &srpt_tpg_attrib_attr_use_srq, + NULL, +}; + +static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr) +{ + struct rdma_cm_id *rdma_cm_id; + int ret; + + rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler, + NULL, RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(rdma_cm_id)) { + pr_err("RDMA/CM ID creation failed: %ld\n", + PTR_ERR(rdma_cm_id)); + goto out; + } + + ret = rdma_bind_addr(rdma_cm_id, listen_addr); + if (ret) { + char addr_str[64]; + + snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr); + pr_err("Binding RDMA/CM ID to address %s failed: %d\n", + addr_str, ret); + rdma_destroy_id(rdma_cm_id); + rdma_cm_id = ERR_PTR(ret); + goto out; + } + + ret = rdma_listen(rdma_cm_id, 128); + if (ret) { + pr_err("rdma_listen() failed: %d\n", ret); + rdma_destroy_id(rdma_cm_id); + rdma_cm_id = ERR_PTR(ret); + } + +out: + return rdma_cm_id; +} + +static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page) +{ + return sprintf(page, "%d\n", rdma_cm_port); +} + +static ssize_t srpt_rdma_cm_port_store(struct config_item *item, + const char *page, size_t count) +{ + struct sockaddr_in addr4 = { .sin_family = AF_INET }; + struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 }; + struct rdma_cm_id *new_id = NULL; + u16 val; + int ret; + + ret = kstrtou16(page, 0, &val); + if (ret < 0) + return ret; + ret = count; + if (rdma_cm_port == val) + goto out; + + if (val) { + addr6.sin6_port = cpu_to_be16(val); + new_id = srpt_create_rdma_id((struct sockaddr *)&addr6); + if (IS_ERR(new_id)) { + addr4.sin_port = cpu_to_be16(val); + new_id = srpt_create_rdma_id((struct sockaddr *)&addr4); + if (IS_ERR(new_id)) { + ret = PTR_ERR(new_id); + goto out; + } + } + } + + mutex_lock(&rdma_cm_mutex); + rdma_cm_port = val; + swap(rdma_cm_id, new_id); + mutex_unlock(&rdma_cm_mutex); + + if (new_id) + rdma_destroy_id(new_id); + ret = count; +out: + return ret; +} + +CONFIGFS_ATTR(srpt_, rdma_cm_port); + +static struct configfs_attribute *srpt_da_attrs[] = { + &srpt_attr_rdma_cm_port, + NULL, +}; + +static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + + return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0); +} + +static ssize_t srpt_tpg_enable_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); + unsigned long tmp; + int ret; + + ret = kstrtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract srpt_tpg_store_enable\n"); + return -EINVAL; + } + + if ((tmp != 0) && (tmp != 1)) { + pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp); + return -EINVAL; + } + + mutex_lock(&sport->mutex); + srpt_set_enabled(sport, tmp); + mutex_unlock(&sport->mutex); + + return count; +} + +CONFIGFS_ATTR(srpt_tpg_, enable); + +static struct configfs_attribute *srpt_tpg_attrs[] = { + &srpt_tpg_attr_enable, + NULL, +}; + +/** + * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg + * @wwn: Corresponds to $driver/$port. + * @name: $tpg. + */ +static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, + const char *name) +{ + struct srpt_port *sport = wwn->priv; + static struct se_portal_group *tpg; + int res; + + WARN_ON_ONCE(wwn != &sport->port_guid_wwn && + wwn != &sport->port_gid_wwn); + tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg : + &sport->port_gid_tpg; + res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP); + if (res) + return ERR_PTR(res); + + return tpg; +} + +/** + * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg + * @tpg: Target portal group to deregister. + */ +static void srpt_drop_tpg(struct se_portal_group *tpg) +{ + struct srpt_port *sport = srpt_tpg_to_sport(tpg); + + sport->enabled = false; + core_tpg_deregister(tpg); +} + +/** + * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port + * @tf: Not used. + * @group: Not used. + * @name: $port. + */ +static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL); +} + +/** + * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port + * @wwn: $port. + */ +static void srpt_drop_tport(struct se_wwn *wwn) +{ +} + +static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); +} + +CONFIGFS_ATTR_RO(srpt_wwn_, version); + +static struct configfs_attribute *srpt_wwn_attrs[] = { + &srpt_wwn_attr_version, + NULL, +}; + +static const struct target_core_fabric_ops srpt_template = { + .module = THIS_MODULE, + .name = "srpt", + .get_fabric_name = srpt_get_fabric_name, + .tpg_get_wwn = srpt_get_fabric_wwn, + .tpg_get_tag = srpt_get_tag, + .tpg_check_demo_mode = srpt_check_false, + .tpg_check_demo_mode_cache = srpt_check_true, + .tpg_check_demo_mode_write_protect = srpt_check_true, + .tpg_check_prod_mode_write_protect = srpt_check_false, + .tpg_get_inst_index = srpt_tpg_get_inst_index, + .release_cmd = srpt_release_cmd, + .check_stop_free = srpt_check_stop_free, + .close_session = srpt_close_session, + .sess_get_index = srpt_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = srpt_write_pending, + .write_pending_status = srpt_write_pending_status, + .set_default_node_attributes = srpt_set_default_node_attrs, + .get_cmd_state = srpt_get_tcm_cmd_state, + .queue_data_in = srpt_queue_data_in, + .queue_status = srpt_queue_status, + .queue_tm_rsp = srpt_queue_tm_rsp, + .aborted_task = srpt_aborted_task, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = srpt_make_tport, + .fabric_drop_wwn = srpt_drop_tport, + .fabric_make_tpg = srpt_make_tpg, + .fabric_drop_tpg = srpt_drop_tpg, + .fabric_init_nodeacl = srpt_init_nodeacl, + + .tfc_discovery_attrs = srpt_da_attrs, + .tfc_wwn_attrs = srpt_wwn_attrs, + .tfc_tpg_base_attrs = srpt_tpg_attrs, + .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, +}; + +/** + * srpt_init_module - kernel module initialization + * + * Note: Since ib_register_client() registers callback functions, and since at + * least one of these callback functions (srpt_add_one()) calls target core + * functions, this driver must be registered with the target core before + * ib_register_client() is called. + */ +static int __init srpt_init_module(void) +{ + int ret; + + ret = -EINVAL; + if (srp_max_req_size < MIN_MAX_REQ_SIZE) { + pr_err("invalid value %d for kernel module parameter" + " srp_max_req_size -- must be at least %d.\n", + srp_max_req_size, MIN_MAX_REQ_SIZE); + goto out; + } + + if (srpt_srq_size < MIN_SRPT_SRQ_SIZE + || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { + pr_err("invalid value %d for kernel module parameter" + " srpt_srq_size -- must be in the range [%d..%d].\n", + srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); + goto out; + } + + ret = target_register_template(&srpt_template); + if (ret) + goto out; + + ret = ib_register_client(&srpt_client); + if (ret) { + pr_err("couldn't register IB client\n"); + goto out_unregister_target; + } + + return 0; + +out_unregister_target: + target_unregister_template(&srpt_template); +out: + return ret; +} + +static void __exit srpt_cleanup_module(void) +{ + if (rdma_cm_id) + rdma_destroy_id(rdma_cm_id); + ib_unregister_client(&srpt_client); + target_unregister_template(&srpt_template); +} + +module_init(srpt_init_module); +module_exit(srpt_cleanup_module); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h new file mode 100644 index 000000000..444dfd728 --- /dev/null +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. + * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef IB_SRPT_H +#define IB_SRPT_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/wait.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_sa.h> +#include <rdma/ib_cm.h> +#include <rdma/rdma_cm.h> +#include <rdma/rw.h> + +#include <scsi/srp.h> + +#include "ib_dm_mad.h" + +/* + * The prefix the ServiceName field must start with in the device management + * ServiceEntries attribute pair. See also the SRP specification. + */ +#define SRP_SERVICE_NAME_PREFIX "SRP.T10:" + +struct srpt_nexus; + +enum { + /* + * SRP IOControllerProfile attributes for SRP target ports that have + * not been defined in <scsi/srp.h>. Source: section B.7, table B.7 + * in the SRP specification. + */ + SRP_PROTOCOL = 0x0108, + SRP_PROTOCOL_VERSION = 0x0001, + SRP_IO_SUBCLASS = 0x609e, + SRP_SEND_TO_IOC = 0x01, + SRP_SEND_FROM_IOC = 0x02, + SRP_RDMA_READ_FROM_IOC = 0x08, + SRP_RDMA_WRITE_FROM_IOC = 0x20, + + /* + * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP + * specification. + */ + SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */ + SRP_LOSOLNT = 0x10, /* logout solicited notification */ + SRP_CRSOLNT = 0x20, /* credit request solicited notification */ + SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */ + + /* + * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables + * 18 and 20 in the SRP specification. + */ + SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */ + SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */ + + /* + * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables + * 16 and 22 in the SRP specification. + */ + SRP_SOLNT = 0x01, /* SOLNT = solicited notification */ + + /* See also table 24 in the SRP specification. */ + SRP_TSK_MGMT_SUCCESS = 0x00, + SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04, + SRP_TSK_MGMT_FAILED = 0x05, + + /* See also table 21 in the SRP specification. */ + SRP_CMD_SIMPLE_Q = 0x0, + SRP_CMD_HEAD_OF_Q = 0x1, + SRP_CMD_ORDERED_Q = 0x2, + SRP_CMD_ACA = 0x4, + + SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0, + SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1, + SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, + + SRPT_DEF_SG_TABLESIZE = 128, + /* + * An experimentally determined value that avoids that QP creation + * fails due to "swiotlb buffer is full" on systems using the swiotlb. + */ + SRPT_MAX_SG_PER_WQE = 16, + + MIN_SRPT_SQ_SIZE = 16, + DEF_SRPT_SQ_SIZE = 4096, + MAX_SRPT_RQ_SIZE = 128, + MIN_SRPT_SRQ_SIZE = 4, + DEFAULT_SRPT_SRQ_SIZE = 4095, + MAX_SRPT_SRQ_SIZE = 65535, + MAX_SRPT_RDMA_SIZE = 1U << 24, + MAX_SRPT_RSP_SIZE = 1024, + + MIN_MAX_REQ_SIZE = 996, + DEFAULT_MAX_REQ_SIZE + = sizeof(struct srp_cmd)/*48*/ + + sizeof(struct srp_indirect_buf)/*20*/ + + 128 * sizeof(struct srp_direct_buf)/*16*/, + + MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4, + DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */ + + DEFAULT_MAX_RDMA_SIZE = 65536, +}; + +/** + * enum srpt_command_state - SCSI command state managed by SRPT + * @SRPT_STATE_NEW: New command arrived and is being processed. + * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting + * for data arrival. + * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is + * being processed. + * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent. + * @SRPT_STATE_MGMT: Processing a SCSI task management command. + * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent. + * @SRPT_STATE_DONE: Command processing finished successfully, command + * processing has been aborted or command processing + * failed. + */ +enum srpt_command_state { + SRPT_STATE_NEW = 0, + SRPT_STATE_NEED_DATA = 1, + SRPT_STATE_DATA_IN = 2, + SRPT_STATE_CMD_RSP_SENT = 3, + SRPT_STATE_MGMT = 4, + SRPT_STATE_MGMT_RSP_SENT = 5, + SRPT_STATE_DONE = 6, +}; + +/** + * struct srpt_ioctx - shared SRPT I/O context information + * @cqe: Completion queue element. + * @buf: Pointer to the buffer. + * @dma: DMA address of the buffer. + * @index: Index of the I/O context in its ioctx_ring array. + */ +struct srpt_ioctx { + struct ib_cqe cqe; + void *buf; + dma_addr_t dma; + uint32_t index; +}; + +/** + * struct srpt_recv_ioctx - SRPT receive I/O context + * @ioctx: See above. + * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list. + */ +struct srpt_recv_ioctx { + struct srpt_ioctx ioctx; + struct list_head wait_list; +}; + +struct srpt_rw_ctx { + struct rdma_rw_ctx rw; + struct scatterlist *sg; + unsigned int nents; +}; + +/** + * struct srpt_send_ioctx - SRPT send I/O context + * @ioctx: See above. + * @ch: Channel pointer. + * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed. + * @rw_ctxs: RDMA read/write contexts. + * @rdma_cqe: RDMA completion queue element. + * @free_list: Node in srpt_rdma_ch.free_list. + * @state: I/O context state. + * @cmd: Target core command data structure. + * @sense_data: SCSI sense data. + * @n_rdma: Number of work requests needed to transfer this ioctx. + * @n_rw_ctx: Size of rw_ctxs array. + * @queue_status_only: Send a SCSI status back to the initiator but no data. + * @sense_data: Sense data to be sent to the initiator. + */ +struct srpt_send_ioctx { + struct srpt_ioctx ioctx; + struct srpt_rdma_ch *ch; + + struct srpt_rw_ctx s_rw_ctx; + struct srpt_rw_ctx *rw_ctxs; + + struct ib_cqe rdma_cqe; + struct list_head free_list; + enum srpt_command_state state; + struct se_cmd cmd; + u8 n_rdma; + u8 n_rw_ctx; + bool queue_status_only; + u8 sense_data[TRANSPORT_SENSE_BUFFER]; +}; + +/** + * enum rdma_ch_state - SRP channel state + * @CH_CONNECTING: QP is in RTR state; waiting for RTU. + * @CH_LIVE: QP is in RTS state. + * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has + * been received. + * @CH_DRAINING: DREP has been received or waiting for DREP timed out + * and last work request has been queued. + * @CH_DISCONNECTED: Last completion has been received. + */ +enum rdma_ch_state { + CH_CONNECTING, + CH_LIVE, + CH_DISCONNECTING, + CH_DRAINING, + CH_DISCONNECTED, +}; + +/** + * struct srpt_rdma_ch - RDMA channel + * @nexus: I_T nexus this channel is associated with. + * @qp: IB queue pair used for communicating over this channel. + * @cm_id: IB CM ID associated with the channel. + * @cq: IB completion queue for this channel. + * @zw_cqe: Zero-length write CQE. + * @rcu: RCU head. + * @kref: kref for this channel. + * @rq_size: IB receive queue size. + * @max_rsp_size: Maximum size of an RSP response message in bytes. + * @sq_wr_avail: number of work requests available in the send queue. + * @sport: pointer to the information of the HCA port used by this + * channel. + * @max_ti_iu_len: maximum target-to-initiator information unit length. + * @req_lim: request limit: maximum number of requests that may be sent + * by the initiator without having received a response. + * @req_lim_delta: Number of credits not yet sent back to the initiator. + * @spinlock: Protects free_list and state. + * @free_list: Head of list with free send I/O contexts. + * @state: channel state. See also enum rdma_ch_state. + * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel. + * @processing_wait_list: Whether or not cmd_wait_list is being processed. + * @ioctx_ring: Send ring. + * @ioctx_recv_ring: Receive I/O context ring. + * @list: Node in srpt_nexus.ch_list. + * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This + * list contains struct srpt_ioctx elements and is protected + * against concurrent modification by the cm_id spinlock. + * @pkey: P_Key of the IB partition for this SRP channel. + * @sess: Session information associated with this SRP channel. + * @sess_name: Session name. + * @release_work: Allows scheduling of srpt_release_channel(). + */ +struct srpt_rdma_ch { + struct srpt_nexus *nexus; + struct ib_qp *qp; + union { + struct { + struct ib_cm_id *cm_id; + } ib_cm; + struct { + struct rdma_cm_id *cm_id; + } rdma_cm; + }; + struct ib_cq *cq; + struct ib_cqe zw_cqe; + struct rcu_head rcu; + struct kref kref; + int rq_size; + u32 max_rsp_size; + atomic_t sq_wr_avail; + struct srpt_port *sport; + int max_ti_iu_len; + atomic_t req_lim; + atomic_t req_lim_delta; + spinlock_t spinlock; + struct list_head free_list; + enum rdma_ch_state state; + struct srpt_send_ioctx **ioctx_ring; + struct srpt_recv_ioctx **ioctx_recv_ring; + struct list_head list; + struct list_head cmd_wait_list; + uint16_t pkey; + bool using_rdma_cm; + bool processing_wait_list; + struct se_session *sess; + u8 sess_name[40]; + struct work_struct release_work; +}; + +/** + * struct srpt_nexus - I_T nexus + * @rcu: RCU head for this data structure. + * @entry: srpt_port.nexus_list list node. + * @ch_list: struct srpt_rdma_ch list. Protected by srpt_port.mutex. + * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ. + * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ. + */ +struct srpt_nexus { + struct rcu_head rcu; + struct list_head entry; + struct list_head ch_list; + u8 i_port_id[16]; + u8 t_port_id[16]; +}; + +/** + * struct srpt_port_attib - attributes for SRPT port + * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections. + * @srp_max_rsp_size: Maximum size of SRP response messages in bytes. + * @srp_sq_size: Shared receive queue (SRQ) size. + * @use_srq: Whether or not to use SRQ. + */ +struct srpt_port_attrib { + u32 srp_max_rdma_size; + u32 srp_max_rsp_size; + u32 srp_sq_size; + bool use_srq; +}; + +/** + * struct srpt_port - information associated by SRPT with a single IB port + * @sdev: backpointer to the HCA information. + * @mad_agent: per-port management datagram processing information. + * @enabled: Whether or not this target port is enabled. + * @port_guid: ASCII representation of Port GUID + * @port_gid: ASCII representation of Port GID + * @port: one-based port number. + * @sm_lid: cached value of the port's sm_lid. + * @lid: cached value of the port's lid. + * @gid: cached value of the port's gid. + * @port_acl_lock spinlock for port_acl_list: + * @work: work structure for refreshing the aforementioned cached values. + * @port_guid_tpg: TPG associated with target port GUID. + * @port_guid_wwn: WWN associated with target port GUID. + * @port_gid_tpg: TPG associated with target port GID. + * @port_gid_wwn: WWN associated with target port GID. + * @port_attrib: Port attributes that can be accessed through configfs. + * @ch_releaseQ: Enables waiting for removal from nexus_list. + * @mutex: Protects nexus_list. + * @nexus_list: Nexus list. See also srpt_nexus.entry. + */ +struct srpt_port { + struct srpt_device *sdev; + struct ib_mad_agent *mad_agent; + bool enabled; + u8 port_guid[24]; + u8 port_gid[64]; + u8 port; + u32 sm_lid; + u32 lid; + union ib_gid gid; + struct work_struct work; + struct se_portal_group port_guid_tpg; + struct se_wwn port_guid_wwn; + struct se_portal_group port_gid_tpg; + struct se_wwn port_gid_wwn; + struct srpt_port_attrib port_attrib; + wait_queue_head_t ch_releaseQ; + struct mutex mutex; + struct list_head nexus_list; +}; + +/** + * struct srpt_device - information associated by SRPT with a single HCA + * @device: Backpointer to the struct ib_device managed by the IB core. + * @pd: IB protection domain. + * @lkey: L_Key (local key) with write access to all local memory. + * @srq: Per-HCA SRQ (shared receive queue). + * @cm_id: Connection identifier. + * @srq_size: SRQ size. + * @sdev_mutex: Serializes use_srq changes. + * @use_srq: Whether or not to use SRQ. + * @ioctx_ring: Per-HCA SRQ. + * @event_handler: Per-HCA asynchronous IB event handler. + * @list: Node in srpt_dev_list. + * @port: Information about the ports owned by this HCA. + */ +struct srpt_device { + struct ib_device *device; + struct ib_pd *pd; + u32 lkey; + struct ib_srq *srq; + struct ib_cm_id *cm_id; + int srq_size; + struct mutex sdev_mutex; + bool use_srq; + struct srpt_recv_ioctx **ioctx_ring; + struct ib_event_handler event_handler; + struct list_head list; + struct srpt_port port[]; +}; + +#endif /* IB_SRPT_H */ |