summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/ipoib')
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig50
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h845
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1663
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c228
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c251
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1312
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2686
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c1062
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c200
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c294
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c294
12 files changed, 8898 insertions, 0 deletions
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
new file mode 100644
index 000000000..254e31a90
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_IPOIB
+ tristate "IP-over-InfiniBand"
+ depends on NETDEVICES && INET && (IPV6 || IPV6=n)
+ help
+ Support for the IP-over-InfiniBand protocol (IPoIB). This
+ transports IP packets over InfiniBand so you can use your IB
+ device as a fancy NIC.
+
+ See Documentation/infiniband/ipoib.rst for more information
+
+config INFINIBAND_IPOIB_CM
+ bool "IP-over-InfiniBand Connected Mode support"
+ depends on INFINIBAND_IPOIB
+ default n
+ help
+ This option enables support for IPoIB connected mode. After
+ enabling this option, you need to switch to connected mode
+ through /sys/class/net/ibXXX/mode to actually create
+ connections, and then increase the interface MTU with
+ e.g. ifconfig ib0 mtu 65520.
+
+ WARNING: Enabling connected mode will trigger some packet
+ drops for multicast and UD mode traffic from this interface,
+ unless you limit mtu for these destinations to 2044.
+
+config INFINIBAND_IPOIB_DEBUG
+ bool "IP-over-InfiniBand debugging" if EXPERT
+ depends on INFINIBAND_IPOIB
+ default y
+ help
+ This option causes debugging code to be compiled into the
+ IPoIB driver. The output can be turned on via the
+ debug_level and mcast_debug_level module parameters (which
+ can also be set after the driver is loaded through sysfs).
+
+ This option also creates a directory tree under ipoib/ in
+ debugfs, which contains files that expose debugging
+ information about IB multicast groups used by the IPoIB
+ driver.
+
+config INFINIBAND_IPOIB_DEBUG_DATA
+ bool "IP-over-InfiniBand data path debugging"
+ depends on INFINIBAND_IPOIB_DEBUG
+ help
+ This option compiles debugging code into the data path
+ of the IPoIB driver. The output can be turned on via the
+ data_debug_level module parameter; however, even with output
+ turned off, this debugging code will have some performance
+ impact.
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
new file mode 100644
index 000000000..6ece857ed
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
+
+ib_ipoib-y := ipoib_main.o \
+ ipoib_ib.o \
+ ipoib_multicast.o \
+ ipoib_verbs.o \
+ ipoib_vlan.o \
+ ipoib_ethtool.o \
+ ipoib_netlink.o
+ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o
+ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
+
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
new file mode 100644
index 000000000..3440dc48d
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _IPOIB_H
+#define _IPOIB_H
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/kref.h>
+#include <linux/if_infiniband.h>
+#include <linux/mutex.h>
+
+#include <net/neighbour.h>
+#include <net/sch_generic.h>
+
+#include <linux/atomic.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_sa.h>
+#include <linux/sched.h>
+/* constants */
+
+enum ipoib_flush_level {
+ IPOIB_FLUSH_LIGHT,
+ IPOIB_FLUSH_NORMAL,
+ IPOIB_FLUSH_HEAVY
+};
+
+enum {
+ IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
+
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
+
+ IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
+ IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
+ IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
+ IPOIB_CM_RX_SG = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
+ IPOIB_RX_RING_SIZE = 256,
+ IPOIB_TX_RING_SIZE = 128,
+ IPOIB_MAX_QUEUE_SIZE = 8192,
+ IPOIB_MIN_QUEUE_SIZE = 2,
+ IPOIB_CM_MAX_CONN_QP = 4096,
+
+ IPOIB_NUM_WC = 4,
+
+ IPOIB_MAX_PATH_REC_QUEUE = 3,
+ IPOIB_MAX_MCAST_QUEUE = 64,
+
+ IPOIB_FLAG_OPER_UP = 0,
+ IPOIB_FLAG_INITIALIZED = 1,
+ IPOIB_FLAG_ADMIN_UP = 2,
+ IPOIB_PKEY_ASSIGNED = 3,
+ IPOIB_FLAG_SUBINTERFACE = 5,
+ IPOIB_STOP_REAPER = 7,
+ IPOIB_FLAG_ADMIN_CM = 9,
+ IPOIB_FLAG_UMCAST = 10,
+ IPOIB_NEIGH_TBL_FLUSH = 12,
+ IPOIB_FLAG_DEV_ADDR_SET = 13,
+ IPOIB_FLAG_DEV_ADDR_CTRL = 14,
+
+ IPOIB_MAX_BACKOFF_SECONDS = 16,
+
+ IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
+ IPOIB_MCAST_FLAG_SENDONLY = 1,
+ /*
+ * For IPOIB_MCAST_FLAG_BUSY
+ * When set, in flight join and mcast->mc is unreliable
+ * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
+ * haven't started yet
+ * When clear and mcast->mc is valid pointer, join was successful
+ */
+ IPOIB_MCAST_FLAG_BUSY = 2,
+ IPOIB_MCAST_FLAG_ATTACHED = 3,
+
+ MAX_SEND_CQE = 64,
+ IPOIB_CM_COPYBREAK = 256,
+
+ IPOIB_NON_CHILD = 0,
+ IPOIB_LEGACY_CHILD = 1,
+ IPOIB_RTNL_CHILD = 2,
+};
+
+#define IPOIB_OP_RECV (1ul << 31)
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+#define IPOIB_OP_CM (1ul << 30)
+#else
+#define IPOIB_OP_CM (0)
+#endif
+
+#define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF))
+
+/* structs */
+
+struct ipoib_header {
+ __be16 proto;
+ u16 reserved;
+};
+
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
+};
+
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
+{
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
+}
+
+static inline struct ipoib_dev_priv *ipoib_priv(const struct net_device *dev)
+{
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ return rn->clnt_priv;
+}
+
+/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
+struct ipoib_mcast {
+ struct ib_sa_mcmember_rec mcmember;
+ struct ib_sa_multicast *mc;
+ struct ipoib_ah *ah;
+
+ struct rb_node rb_node;
+ struct list_head list;
+
+ unsigned long created;
+ unsigned long backoff;
+ unsigned long delay_until;
+
+ unsigned long flags;
+ unsigned char logcount;
+
+ struct list_head neigh_list;
+
+ struct sk_buff_head pkt_queue;
+
+ struct net_device *dev;
+ struct completion done;
+};
+
+struct ipoib_rx_buf {
+ struct sk_buff *skb;
+ u64 mapping[IPOIB_UD_RX_SG];
+};
+
+struct ipoib_tx_buf {
+ struct sk_buff *skb;
+ u64 mapping[MAX_SKB_FRAGS + 1];
+};
+
+struct ib_cm_id;
+
+struct ipoib_cm_data {
+ __be32 qpn; /* High byte MUST be ignored on receive */
+ __be32 mtu;
+};
+
+/*
+ * Quoting 10.3.1 Queue Pair and EE Context States:
+ *
+ * Note, for QPs that are associated with an SRQ, the Consumer should take the
+ * QP through the Error State before invoking a Destroy QP or a Modify QP to the
+ * Reset State. The Consumer may invoke the Destroy QP without first performing
+ * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
+ * Last WQE Reached Event. However, if the Consumer does not wait for the
+ * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
+ * leakage may occur. Therefore, it is good programming practice to tear down a
+ * QP that is associated with an SRQ by using the following process:
+ *
+ * - Put the QP in the Error State
+ * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
+ * - either:
+ * drain the CQ by invoking the Poll CQ verb and either wait for CQ
+ * to be empty or the number of Poll CQ operations has exceeded
+ * CQ capacity size;
+ * - or
+ * post another WR that completes on the same CQ and wait for this
+ * WR to return as a WC;
+ * - and then invoke a Destroy QP or Reset QP.
+ *
+ * We use the second option and wait for a completion on the
+ * same CQ before destroying QPs attached to our SRQ.
+ */
+
+enum ipoib_cm_state {
+ IPOIB_CM_RX_LIVE,
+ IPOIB_CM_RX_ERROR, /* Ignored by stale task */
+ IPOIB_CM_RX_FLUSH /* Last WQE Reached event observed */
+};
+
+struct ipoib_cm_rx {
+ struct ib_cm_id *id;
+ struct ib_qp *qp;
+ struct ipoib_cm_rx_buf *rx_ring;
+ struct list_head list;
+ struct net_device *dev;
+ unsigned long jiffies;
+ enum ipoib_cm_state state;
+ int recv_count;
+};
+
+struct ipoib_cm_tx {
+ struct ib_cm_id *id;
+ struct ib_qp *qp;
+ struct list_head list;
+ struct net_device *dev;
+ struct ipoib_neigh *neigh;
+ struct ipoib_tx_buf *tx_ring;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ unsigned long flags;
+ u32 mtu;
+ unsigned int max_send_sge;
+};
+
+struct ipoib_cm_rx_buf {
+ struct sk_buff *skb;
+ u64 mapping[IPOIB_CM_RX_SG];
+};
+
+struct ipoib_cm_dev_priv {
+ struct ib_srq *srq;
+ struct ipoib_cm_rx_buf *srq_ring;
+ struct ib_cm_id *id;
+ struct list_head passive_ids; /* state: LIVE */
+ struct list_head rx_error_list; /* state: ERROR */
+ struct list_head rx_flush_list; /* state: FLUSH, drain not started */
+ struct list_head rx_drain_list; /* state: FLUSH, drain started */
+ struct list_head rx_reap_list; /* state: FLUSH, drain done */
+ struct work_struct start_task;
+ struct work_struct reap_task;
+ struct work_struct skb_task;
+ struct work_struct rx_reap_task;
+ struct delayed_work stale_task;
+ struct sk_buff_head skb_queue;
+ struct list_head start_list;
+ struct list_head reap_list;
+ struct ib_wc ibwc[IPOIB_NUM_WC];
+ struct ib_sge rx_sge[IPOIB_CM_RX_SG];
+ struct ib_recv_wr rx_wr;
+ int nonsrq_conn_qp;
+ int max_cm_mtu;
+ int num_frags;
+};
+
+struct ipoib_ethtool_st {
+ u16 coalesce_usecs;
+ u16 max_coalesced_frames;
+};
+
+struct ipoib_neigh_table;
+
+struct ipoib_neigh_hash {
+ struct ipoib_neigh_table *ntbl;
+ struct ipoib_neigh __rcu **buckets;
+ struct rcu_head rcu;
+ u32 mask;
+ u32 size;
+};
+
+struct ipoib_neigh_table {
+ struct ipoib_neigh_hash __rcu *htbl;
+ atomic_t entries;
+ struct completion flushed;
+ struct completion deleted;
+};
+
+struct ipoib_qp_state_validate {
+ struct work_struct work;
+ struct ipoib_dev_priv *priv;
+};
+
+/*
+ * Device private locking: network stack tx_lock protects members used
+ * in TX fast path, lock protects everything else. lock nests inside
+ * of tx_lock (ie tx_lock must be acquired first if needed).
+ */
+struct ipoib_dev_priv {
+ spinlock_t lock;
+
+ struct net_device *dev;
+ void (*next_priv_destructor)(struct net_device *dev);
+
+ struct napi_struct send_napi;
+ struct napi_struct recv_napi;
+
+ unsigned long flags;
+
+ /*
+ * This protects access to the child_intfs list.
+ * To READ from child_intfs the RTNL or vlan_rwsem read side must be
+ * held. To WRITE RTNL and the vlan_rwsem write side must be held (in
+ * that order) This lock exists because we have a few contexts where
+ * we need the child_intfs, but do not want to grab the RTNL.
+ */
+ struct rw_semaphore vlan_rwsem;
+ struct mutex mcast_mutex;
+
+ struct rb_root path_tree;
+ struct list_head path_list;
+
+ struct ipoib_neigh_table ntbl;
+
+ struct ipoib_mcast *broadcast;
+ struct list_head multicast_list;
+ struct rb_root multicast_tree;
+
+ struct workqueue_struct *wq;
+ struct delayed_work mcast_task;
+ struct work_struct carrier_on_task;
+ struct work_struct flush_light;
+ struct work_struct flush_normal;
+ struct work_struct flush_heavy;
+ struct work_struct restart_task;
+ struct delayed_work ah_reap_task;
+ struct delayed_work neigh_reap_task;
+ struct ib_device *ca;
+ u8 port;
+ u16 pkey;
+ u16 pkey_index;
+ struct ib_pd *pd;
+ struct ib_cq *recv_cq;
+ struct ib_cq *send_cq;
+ struct ib_qp *qp;
+ u32 qkey;
+
+ union ib_gid local_gid;
+ u32 local_lid;
+
+ unsigned int admin_mtu;
+ unsigned int mcast_mtu;
+ unsigned int max_ib_mtu;
+
+ struct ipoib_rx_buf *rx_ring;
+
+ struct ipoib_tx_buf *tx_ring;
+ /* cyclic ring variables for managing tx_ring, for UD only */
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ /* cyclic ring variables for counting overall outstanding send WRs */
+ unsigned int global_tx_head;
+ unsigned int global_tx_tail;
+ struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
+ struct ib_ud_wr tx_wr;
+ struct ib_wc send_wc[MAX_SEND_CQE];
+
+ struct ib_recv_wr rx_wr;
+ struct ib_sge rx_sge[IPOIB_UD_RX_SG];
+
+ struct ib_wc ibwc[IPOIB_NUM_WC];
+
+ struct list_head dead_ahs;
+
+ struct ib_event_handler event_handler;
+
+ struct net_device *parent;
+ struct list_head child_intfs;
+ struct list_head list;
+ int child_type;
+
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+ struct ipoib_cm_dev_priv cm;
+#endif
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ struct list_head fs_list;
+ struct dentry *mcg_dentry;
+ struct dentry *path_dentry;
+#endif
+ u64 hca_caps;
+ struct ipoib_ethtool_st ethtool;
+ unsigned int max_send_sge;
+ bool sm_fullmember_sendonly_support;
+ const struct net_device_ops *rn_ops;
+};
+
+struct ipoib_ah {
+ struct net_device *dev;
+ struct ib_ah *ah;
+ struct list_head list;
+ struct kref ref;
+ unsigned int last_send;
+ int valid;
+};
+
+struct ipoib_path {
+ struct net_device *dev;
+ struct sa_path_rec pathrec;
+ struct ipoib_ah *ah;
+ struct sk_buff_head queue;
+
+ struct list_head neigh_list;
+
+ int query_id;
+ struct ib_sa_query *query;
+ struct completion done;
+
+ struct rb_node rb_node;
+ struct list_head list;
+};
+
+struct ipoib_neigh {
+ struct ipoib_ah *ah;
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+ struct ipoib_cm_tx *cm;
+#endif
+ u8 daddr[INFINIBAND_ALEN];
+ struct sk_buff_head queue;
+
+ struct net_device *dev;
+
+ struct list_head list;
+ struct ipoib_neigh __rcu *hnext;
+ struct rcu_head rcu;
+ atomic_t refcnt;
+ unsigned long alive;
+};
+
+#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
+
+void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
+static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
+{
+ if (atomic_dec_and_test(&neigh->refcnt))
+ ipoib_neigh_dtor(neigh);
+}
+struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr);
+struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
+ struct net_device *dev);
+void ipoib_neigh_free(struct ipoib_neigh *neigh);
+void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid);
+
+extern struct workqueue_struct *ipoib_workqueue;
+
+/* functions */
+
+int ipoib_rx_poll(struct napi_struct *napi, int budget);
+int ipoib_tx_poll(struct napi_struct *napi, int budget);
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr);
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr);
+
+struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
+ struct ib_pd *pd, struct rdma_ah_attr *attr);
+void ipoib_free_ah(struct kref *kref);
+static inline void ipoib_put_ah(struct ipoib_ah *ah)
+{
+ kref_put(&ah->ref, ipoib_free_ah);
+}
+int ipoib_open(struct net_device *dev);
+void ipoib_intf_free(struct net_device *dev);
+int ipoib_add_pkey_attr(struct net_device *dev);
+int ipoib_add_umcast_attr(struct net_device *dev);
+
+int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn);
+void ipoib_reap_ah(struct work_struct *work);
+
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
+void ipoib_mark_paths_invalid(struct net_device *dev);
+void ipoib_flush_paths(struct net_device *dev);
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+ const char *format);
+int ipoib_intf_init(struct ib_device *hca, u8 port, const char *format,
+ struct net_device *dev);
+void ipoib_ib_tx_timer_func(struct timer_list *t);
+void ipoib_ib_dev_flush_light(struct work_struct *work);
+void ipoib_ib_dev_flush_normal(struct work_struct *work);
+void ipoib_ib_dev_flush_heavy(struct work_struct *work);
+void ipoib_pkey_event(struct work_struct *work);
+void ipoib_ib_dev_cleanup(struct net_device *dev);
+
+int ipoib_ib_dev_open_default(struct net_device *dev);
+int ipoib_ib_dev_open(struct net_device *dev);
+void ipoib_ib_dev_stop(struct net_device *dev);
+void ipoib_ib_dev_up(struct net_device *dev);
+void ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop_default(struct net_device *dev);
+void ipoib_pkey_dev_check_presence(struct net_device *dev);
+
+void ipoib_mcast_join_task(struct work_struct *work);
+void ipoib_mcast_carrier_on_task(struct work_struct *work);
+void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
+
+void ipoib_mcast_restart_task(struct work_struct *work);
+void ipoib_mcast_start_thread(struct net_device *dev);
+void ipoib_mcast_stop_thread(struct net_device *dev);
+
+void ipoib_mcast_dev_down(struct net_device *dev);
+void ipoib_mcast_dev_flush(struct net_device *dev);
+
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req);
+
+struct rtnl_link_ops *ipoib_get_link_ops(void);
+
+static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
+{
+ int i, off;
+ struct sk_buff *skb = tx_req->skb;
+ skb_frag_t *frags = skb_shinfo(skb)->frags;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ u64 *mapping = tx_req->mapping;
+
+ if (skb_headlen(skb)) {
+ priv->tx_sge[0].addr = mapping[0];
+ priv->tx_sge[0].length = skb_headlen(skb);
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < nr_frags; ++i) {
+ priv->tx_sge[i + off].addr = mapping[i + off];
+ priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
+ }
+ priv->tx_wr.wr.num_sge = nr_frags + off;
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
+int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
+void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
+ union ib_gid *gid,
+ unsigned long *created,
+ unsigned int *queuelen,
+ unsigned int *complete,
+ unsigned int *send_only);
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev);
+int ipoib_path_iter_next(struct ipoib_path_iter *iter);
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+ struct ipoib_path *path);
+#endif
+
+int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey);
+int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid);
+void ipoib_mcast_remove_list(struct list_head *remove_list);
+void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
+ struct list_head *remove_list);
+
+int ipoib_init_qp(struct net_device *dev);
+int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
+void ipoib_transport_dev_cleanup(struct net_device *dev);
+
+void ipoib_event(struct ib_event_handler *handler,
+ struct ib_event *record);
+
+int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
+int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
+
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+ u16 pkey, int child_type);
+
+int __init ipoib_netlink_init(void);
+void __exit ipoib_netlink_fini(void);
+
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
+int ipoib_set_mode(struct net_device *dev, const char *buf);
+
+void ipoib_setup_common(struct net_device *dev);
+
+void ipoib_pkey_open(struct ipoib_dev_priv *priv);
+void ipoib_drain_cq(struct net_device *dev);
+
+void ipoib_set_ethtool_ops(struct net_device *dev);
+
+#define IPOIB_FLAGS_RC 0x80
+#define IPOIB_FLAGS_UC 0x40
+
+/* We don't support UC connections at the moment */
+#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
+
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+
+extern int ipoib_max_conn_qp;
+
+static inline int ipoib_cm_admin_enabled(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return IPOIB_CM_SUPPORTED(dev->dev_addr) &&
+ test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+}
+
+static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return IPOIB_CM_SUPPORTED(hwaddr) &&
+ test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+}
+
+static inline int ipoib_cm_up(struct ipoib_neigh *neigh)
+
+{
+ return test_bit(IPOIB_FLAG_OPER_UP, &neigh->cm->flags);
+}
+
+static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh)
+{
+ return neigh->cm;
+}
+
+static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx)
+{
+ neigh->cm = tx;
+}
+
+static inline int ipoib_cm_has_srq(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return !!priv->cm.srq;
+}
+
+static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ return priv->cm.max_cm_mtu;
+}
+
+void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx);
+int ipoib_cm_dev_open(struct net_device *dev);
+void ipoib_cm_dev_stop(struct net_device *dev);
+int ipoib_cm_dev_init(struct net_device *dev);
+int ipoib_cm_add_mode_attr(struct net_device *dev);
+void ipoib_cm_dev_cleanup(struct net_device *dev);
+struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
+ struct ipoib_neigh *neigh);
+void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx);
+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ unsigned int mtu);
+void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
+void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
+#else
+
+struct ipoib_cm_tx;
+
+#define ipoib_max_conn_qp 0
+
+static inline int ipoib_cm_admin_enabled(struct net_device *dev)
+{
+ return 0;
+}
+static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
+
+{
+ return 0;
+}
+
+static inline int ipoib_cm_up(struct ipoib_neigh *neigh)
+
+{
+ return 0;
+}
+
+static inline struct ipoib_cm_tx *ipoib_cm_get(struct ipoib_neigh *neigh)
+{
+ return NULL;
+}
+
+static inline void ipoib_cm_set(struct ipoib_neigh *neigh, struct ipoib_cm_tx *tx)
+{
+}
+
+static inline int ipoib_cm_has_srq(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline unsigned int ipoib_cm_max_mtu(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline
+void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
+{
+ return;
+}
+
+static inline
+int ipoib_cm_dev_open(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline
+void ipoib_cm_dev_stop(struct net_device *dev)
+{
+ return;
+}
+
+static inline
+int ipoib_cm_dev_init(struct net_device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+void ipoib_cm_dev_cleanup(struct net_device *dev)
+{
+ return;
+}
+
+static inline
+struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
+ struct ipoib_neigh *neigh)
+{
+ return NULL;
+}
+
+static inline
+void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
+{
+ return;
+}
+
+static inline
+int ipoib_cm_add_mode_attr(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ unsigned int mtu)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static inline void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+}
+
+static inline void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+}
+#endif
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+void ipoib_create_debug_files(struct net_device *dev);
+void ipoib_delete_debug_files(struct net_device *dev);
+void ipoib_register_debugfs(void);
+void ipoib_unregister_debugfs(void);
+#else
+static inline void ipoib_create_debug_files(struct net_device *dev) { }
+static inline void ipoib_delete_debug_files(struct net_device *dev) { }
+static inline void ipoib_register_debugfs(void) { }
+static inline void ipoib_unregister_debugfs(void) { }
+#endif
+
+#define ipoib_printk(level, priv, format, arg...) \
+ printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
+#define ipoib_warn(priv, format, arg...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ 10 * HZ /*10 seconds */, \
+ 100); \
+ if (__ratelimit(&_rs)) \
+ ipoib_printk(KERN_WARNING, priv, format , ## arg);\
+} while (0)
+
+extern int ipoib_sendq_size;
+extern int ipoib_recvq_size;
+
+extern struct ib_sa_client ipoib_sa_client;
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+extern int ipoib_debug_level;
+
+#define ipoib_dbg(priv, format, arg...) \
+ do { \
+ if (ipoib_debug_level > 0) \
+ ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ } while (0)
+#define ipoib_dbg_mcast(priv, format, arg...) \
+ do { \
+ if (mcast_debug_level > 0) \
+ ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ } while (0)
+#else /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+#define ipoib_dbg(priv, format, arg...) \
+ do { (void) (priv); } while (0)
+#define ipoib_dbg_mcast(priv, format, arg...) \
+ do { (void) (priv); } while (0)
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+#define ipoib_dbg_data(priv, format, arg...) \
+ do { \
+ if (data_debug_level > 0) \
+ ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
+ } while (0)
+#else /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
+#define ipoib_dbg_data(priv, format, arg...) \
+ do { (void) (priv); } while (0)
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
+
+#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
+
+#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
new file mode 100644
index 000000000..8f0b598a4
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -0,0 +1,1663 @@
+/*
+ * Copyright (c) 2006 Mellanox Technologies. All rights reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_cm.h>
+#include <net/dst.h>
+#include <net/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/moduleparam.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+
+#include "ipoib.h"
+
+int ipoib_max_conn_qp = 128;
+
+module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
+MODULE_PARM_DESC(max_nonsrq_conn_qp,
+ "Max number of connected-mode QPs per interface "
+ "(applied only if shared receive queue is not available)");
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+static int data_debug_level;
+
+module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
+MODULE_PARM_DESC(cm_data_debug_level,
+ "Enable data path debug tracing for connected mode if > 0");
+#endif
+
+#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
+
+#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
+#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
+#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
+#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
+static struct ib_qp_attr ipoib_cm_err_attr = {
+ .qp_state = IB_QPS_ERR
+};
+
+#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
+
+static struct ib_send_wr ipoib_cm_rx_drain_wr = {
+ .opcode = IB_WR_SEND,
+};
+
+static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event);
+
+static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
+ u64 mapping[IPOIB_CM_RX_SG])
+{
+ int i;
+
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
+
+ for (i = 0; i < frags; ++i)
+ ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
+static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i, ret;
+
+ priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
+
+ for (i = 0; i < priv->cm.num_frags; ++i)
+ priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
+
+ ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
+ ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
+ priv->cm.srq_ring[id].mapping);
+ dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
+ priv->cm.srq_ring[id].skb = NULL;
+ }
+
+ return ret;
+}
+
+static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
+ struct ipoib_cm_rx *rx,
+ struct ib_recv_wr *wr,
+ struct ib_sge *sge, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i, ret;
+
+ wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
+
+ for (i = 0; i < IPOIB_CM_RX_SG; ++i)
+ sge[i].addr = rx->rx_ring[id].mapping[i];
+
+ ret = ib_post_recv(rx->qp, wr, NULL);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
+ ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+ rx->rx_ring[id].mapping);
+ dev_kfree_skb_any(rx->rx_ring[id].skb);
+ rx->rx_ring[id].skb = NULL;
+ }
+
+ return ret;
+}
+
+static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+ struct ipoib_cm_rx_buf *rx_ring,
+ int id, int frags,
+ u64 mapping[IPOIB_CM_RX_SG],
+ gfp_t gfp)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
+ * IP header to a multiple of 16.
+ */
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
+
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ for (i = 0; i < frags; i++) {
+ struct page *page = alloc_page(gfp);
+
+ if (!page)
+ goto partial_error;
+ skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+
+ mapping[i + 1] = ib_dma_map_page(priv->ca, page,
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
+ goto partial_error;
+ }
+
+ rx_ring[id].skb = skb;
+ return skb;
+
+partial_error:
+
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
+
+ for (; i > 0; --i)
+ ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static void ipoib_cm_free_rx_ring(struct net_device *dev,
+ struct ipoib_cm_rx_buf *rx_ring)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < ipoib_recvq_size; ++i)
+ if (rx_ring[i].skb) {
+ ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+ rx_ring[i].mapping);
+ dev_kfree_skb_any(rx_ring[i].skb);
+ }
+
+ vfree(rx_ring);
+}
+
+static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_cm_rx *p;
+
+ /* We only reserved 1 extra slot in CQ for drain WRs, so
+ * make sure we have at most 1 outstanding WR. */
+ if (list_empty(&priv->cm.rx_flush_list) ||
+ !list_empty(&priv->cm.rx_drain_list))
+ return;
+
+ /*
+ * QPs on flush list are error state. This way, a "flush
+ * error" WC will be immediately generated for each WR we post.
+ */
+ p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
+ ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
+ if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
+ ipoib_warn(priv, "failed to post drain wr\n");
+
+ list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
+}
+
+static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
+{
+ struct ipoib_cm_rx *p = ctx;
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned long flags;
+
+ if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
+ return;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_move(&p->list, &priv->cm.rx_flush_list);
+ p->state = IPOIB_CM_RX_FLUSH;
+ ipoib_cm_start_rx_drain(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
+ struct ipoib_cm_rx *p)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_init_attr attr = {
+ .event_handler = ipoib_cm_rx_event_handler,
+ .send_cq = priv->recv_cq, /* For drain WR */
+ .recv_cq = priv->recv_cq,
+ .srq = priv->cm.srq,
+ .cap.max_send_wr = 1, /* For drain WR */
+ .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
+ .sq_sig_type = IB_SIGNAL_ALL_WR,
+ .qp_type = IB_QPT_RC,
+ .qp_context = p,
+ };
+
+ if (!ipoib_cm_has_srq(dev)) {
+ attr.cap.max_recv_wr = ipoib_recvq_size;
+ attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
+ }
+
+ return ib_create_qp(priv->pd, &attr);
+}
+
+static int ipoib_cm_modify_rx_qp(struct net_device *dev,
+ struct ib_cm_id *cm_id, struct ib_qp *qp,
+ unsigned int psn)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
+ return ret;
+ }
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
+ return ret;
+ }
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
+ return ret;
+ }
+ qp_attr.rq_psn = psn;
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Current Mellanox HCA firmware won't generate completions
+ * with error for drain WRs unless the QP has been moved to
+ * RTS first. This work-around leaves a window where a QP has
+ * moved to error asynchronously, but this will eventually get
+ * fixed in firmware, so let's not error out if modify QP
+ * fails.
+ */
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
+ return 0;
+ }
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void ipoib_cm_init_rx_wr(struct net_device *dev,
+ struct ib_recv_wr *wr,
+ struct ib_sge *sge)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < priv->cm.num_frags; ++i)
+ sge[i].lkey = priv->pd->local_dma_lkey;
+
+ sge[0].length = IPOIB_CM_HEAD_SIZE;
+ for (i = 1; i < priv->cm.num_frags; ++i)
+ sge[i].length = PAGE_SIZE;
+
+ wr->next = NULL;
+ wr->sg_list = sge;
+ wr->num_sge = priv->cm.num_frags;
+}
+
+static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
+ struct ipoib_cm_rx *rx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct {
+ struct ib_recv_wr wr;
+ struct ib_sge sge[IPOIB_CM_RX_SG];
+ } *t;
+ int ret;
+ int i;
+
+ rx->rx_ring = vzalloc(array_size(ipoib_recvq_size,
+ sizeof(*rx->rx_ring)));
+ if (!rx->rx_ring)
+ return -ENOMEM;
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ ret = -ENOMEM;
+ goto err_free_1;
+ }
+
+ ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
+
+ spin_lock_irq(&priv->lock);
+
+ if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
+ spin_unlock_irq(&priv->lock);
+ ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
+ ret = -EINVAL;
+ goto err_free;
+ } else
+ ++priv->cm.nonsrq_conn_qp;
+
+ spin_unlock_irq(&priv->lock);
+
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
+ rx->rx_ring[i].mapping,
+ GFP_KERNEL)) {
+ ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+ ret = -ENOMEM;
+ goto err_count;
+ }
+ ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
+ "failed for buf %d\n", i);
+ ret = -EIO;
+ goto err_count;
+ }
+ }
+
+ rx->recv_count = ipoib_recvq_size;
+
+ kfree(t);
+
+ return 0;
+
+err_count:
+ spin_lock_irq(&priv->lock);
+ --priv->cm.nonsrq_conn_qp;
+ spin_unlock_irq(&priv->lock);
+
+err_free:
+ kfree(t);
+
+err_free_1:
+ ipoib_cm_free_rx_ring(dev, rx->rx_ring);
+
+ return ret;
+}
+
+static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
+ struct ib_qp *qp,
+ const struct ib_cm_req_event_param *req,
+ unsigned int psn)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_data data = {};
+ struct ib_cm_rep_param rep = {};
+
+ data.qpn = cpu_to_be32(priv->qp->qp_num);
+ data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
+
+ rep.private_data = &data;
+ rep.private_data_len = sizeof(data);
+ rep.flow_control = 0;
+ rep.rnr_retry_count = req->rnr_retry_count;
+ rep.srq = ipoib_cm_has_srq(dev);
+ rep.qp_num = qp->qp_num;
+ rep.starting_psn = psn;
+ return ib_send_cm_rep(cm_id, &rep);
+}
+
+static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct net_device *dev = cm_id->context;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx *p;
+ unsigned int psn;
+ int ret;
+
+ ipoib_dbg(priv, "REQ arrived\n");
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ p->dev = dev;
+ p->id = cm_id;
+ cm_id->context = p;
+ p->state = IPOIB_CM_RX_LIVE;
+ p->jiffies = jiffies;
+ INIT_LIST_HEAD(&p->list);
+
+ p->qp = ipoib_cm_create_rx_qp(dev, p);
+ if (IS_ERR(p->qp)) {
+ ret = PTR_ERR(p->qp);
+ goto err_qp;
+ }
+
+ psn = prandom_u32() & 0xffffff;
+ ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
+ if (ret)
+ goto err_modify;
+
+ if (!ipoib_cm_has_srq(dev)) {
+ ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
+ if (ret)
+ goto err_modify;
+ }
+
+ spin_lock_irq(&priv->lock);
+ queue_delayed_work(priv->wq,
+ &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+ /* Add this entry to passive ids list head, but do not re-add it
+ * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
+ p->jiffies = jiffies;
+ if (p->state == IPOIB_CM_RX_LIVE)
+ list_move(&p->list, &priv->cm.passive_ids);
+ spin_unlock_irq(&priv->lock);
+
+ ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
+ if (ret) {
+ ipoib_warn(priv, "failed to send REP: %d\n", ret);
+ if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+ ipoib_warn(priv, "unable to move qp to error state\n");
+ }
+ return 0;
+
+err_modify:
+ ib_destroy_qp(p->qp);
+err_qp:
+ kfree(p);
+ return ret;
+}
+
+static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct ipoib_cm_rx *p;
+ struct ipoib_dev_priv *priv;
+
+ switch (event->event) {
+ case IB_CM_REQ_RECEIVED:
+ return ipoib_cm_req_handler(cm_id, event);
+ case IB_CM_DREQ_RECEIVED:
+ ib_send_cm_drep(cm_id, NULL, 0);
+ fallthrough;
+ case IB_CM_REJ_RECEIVED:
+ p = cm_id->context;
+ priv = ipoib_priv(p->dev);
+ if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+ ipoib_warn(priv, "unable to move qp to error state\n");
+ fallthrough;
+ default:
+ return 0;
+ }
+}
+/* Adjust length of skb with fragments to match received data */
+static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+ unsigned int length, struct sk_buff *toskb)
+{
+ int i, num_frags;
+ unsigned int size;
+
+ /* put header into skb */
+ size = min(length, hdr_space);
+ skb->tail += size;
+ skb->len += size;
+ length -= size;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ if (length == 0) {
+ /* don't need this page */
+ skb_fill_page_desc(toskb, i, skb_frag_page(frag),
+ 0, PAGE_SIZE);
+ --skb_shinfo(skb)->nr_frags;
+ } else {
+ size = min_t(unsigned int, length, PAGE_SIZE);
+
+ skb_frag_size_set(frag, size);
+ skb->data_len += size;
+ skb->truesize += size;
+ skb->len += size;
+ length -= size;
+ }
+ }
+}
+
+void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx_buf *rx_ring;
+ unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
+ struct sk_buff *skb, *newskb;
+ struct ipoib_cm_rx *p;
+ unsigned long flags;
+ u64 mapping[IPOIB_CM_RX_SG];
+ int frags;
+ int has_srq;
+ struct sk_buff *small_skb;
+
+ ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_recvq_size)) {
+ if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
+ ipoib_cm_start_rx_drain(priv);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else
+ ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_recvq_size);
+ return;
+ }
+
+ p = wc->qp->qp_context;
+
+ has_srq = ipoib_cm_has_srq(dev);
+ rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
+
+ skb = rx_ring[wr_id].skb;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ ipoib_dbg(priv,
+ "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ ++dev->stats.rx_dropped;
+ if (has_srq)
+ goto repost;
+ else {
+ if (!--p->recv_count) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_move(&p->list, &priv->cm.rx_reap_list);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
+ }
+ return;
+ }
+ }
+
+ if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
+ if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ p->jiffies = jiffies;
+ /* Move this entry to list head, but do not re-add it
+ * if it has been moved out of list. */
+ if (p->state == IPOIB_CM_RX_LIVE)
+ list_move(&p->list, &priv->cm.passive_ids);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ if (wc->byte_len < IPOIB_CM_COPYBREAK) {
+ int dlen = wc->byte_len;
+
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
+ if (small_skb) {
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
+ ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
+ dlen, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(skb, small_skb->data, dlen);
+ ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
+ dlen, DMA_FROM_DEVICE);
+ skb_put(small_skb, dlen);
+ skb = small_skb;
+ goto copied;
+ }
+ }
+
+ frags = PAGE_ALIGN(wc->byte_len -
+ min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) /
+ PAGE_SIZE;
+
+ newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
+ mapping, GFP_ATOMIC);
+ if (unlikely(!newskb)) {
+ /*
+ * If we can't allocate a new RX buffer, dump
+ * this packet and reuse the old buffer.
+ */
+ ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
+ ++dev->stats.rx_dropped;
+ goto repost;
+ }
+
+ ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
+ memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
+
+ ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ wc->byte_len, wc->slid);
+
+ skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
+
+copied:
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+
+ skb->dev = dev;
+ /* XXX get correct PACKET_ type here */
+ skb->pkt_type = PACKET_HOST;
+ netif_receive_skb(skb);
+
+repost:
+ if (has_srq) {
+ if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
+ ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
+ "for buf %d\n", wr_id);
+ } else {
+ if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
+ &priv->cm.rx_wr,
+ priv->cm.rx_sge,
+ wr_id))) {
+ --p->recv_count;
+ ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
+ "for buf %d\n", wr_id);
+ }
+ }
+}
+
+static inline int post_send(struct ipoib_dev_priv *priv,
+ struct ipoib_cm_tx *tx,
+ unsigned int wr_id,
+ struct ipoib_tx_buf *tx_req)
+{
+ ipoib_build_sge(priv, tx_req);
+
+ priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
+
+ return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL);
+}
+
+void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_tx_buf *tx_req;
+ int rc;
+ unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
+
+ if (unlikely(skb->len > tx->mtu)) {
+ ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len, tx->mtu);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
+ return;
+ }
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
+ ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
+ tx->tx_head, skb->len, tx->qp->qp_num);
+
+ /*
+ * We put the skb into the tx_ring _before_ we call post_send()
+ * because it's entirely possible that the completion handler will
+ * run before we execute anything after the post_send(). That
+ * means we have to make sure everything is properly recorded and
+ * our state is consistent before we call post_send().
+ */
+ tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
+ tx_req->skb = skb;
+
+ if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
+ netif_stop_queue(dev);
+ }
+
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
+ if (netif_queue_stopped(dev)) {
+ rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS);
+ if (unlikely(rc < 0))
+ ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ else if (rc)
+ napi_schedule(&priv->send_napi);
+ }
+
+ rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
+ if (unlikely(rc)) {
+ ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
+ ++dev->stats.tx_errors;
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(skb);
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } else {
+ netif_trans_update(dev);
+ ++tx->tx_head;
+ ++priv->global_tx_head;
+ }
+}
+
+void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_tx *tx = wc->qp->qp_context;
+ unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
+ struct ipoib_tx_buf *tx_req;
+ unsigned long flags;
+
+ ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_sendq_size)) {
+ ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_sendq_size);
+ return;
+ }
+
+ tx_req = &tx->tx_ring[wr_id];
+
+ ipoib_dma_unmap_tx(priv, tx_req);
+
+ /* FIXME: is this right? Shouldn't we only increment on success? */
+ ++dev->stats.tx_packets;
+ dev->stats.tx_bytes += tx_req->skb->len;
+
+ dev_kfree_skb_any(tx_req->skb);
+
+ netif_tx_lock(dev);
+
+ ++tx->tx_tail;
+ ++priv->global_tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
+ netif_wake_queue(dev);
+
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct ipoib_neigh *neigh;
+
+ /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
+ * so don't make waves.
+ */
+ if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
+ wc->status == IB_WC_RETRY_EXC_ERR)
+ ipoib_dbg(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
+ else
+ ipoib_warn(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ neigh = tx->neigh;
+
+ if (neigh) {
+ neigh->cm = NULL;
+ ipoib_neigh_free(neigh);
+
+ tx->neigh = NULL;
+ }
+
+ if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+ list_move(&tx->list, &priv->cm.reap_list);
+ queue_work(priv->wq, &priv->cm.reap_task);
+ }
+
+ clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ netif_tx_unlock(dev);
+}
+
+int ipoib_cm_dev_open(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
+ return 0;
+
+ priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
+ if (IS_ERR(priv->cm.id)) {
+ pr_warn("%s: failed to create CM ID\n", priv->ca->name);
+ ret = PTR_ERR(priv->cm.id);
+ goto err_cm;
+ }
+
+ ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
+ 0);
+ if (ret) {
+ pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
+ IPOIB_CM_IETF_ID | priv->qp->qp_num);
+ goto err_listen;
+ }
+
+ return 0;
+
+err_listen:
+ ib_destroy_cm_id(priv->cm.id);
+err_cm:
+ priv->cm.id = NULL;
+ return ret;
+}
+
+static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx *rx, *n;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&priv->lock);
+ list_splice_init(&priv->cm.rx_reap_list, &list);
+ spin_unlock_irq(&priv->lock);
+
+ list_for_each_entry_safe(rx, n, &list, list) {
+ ib_destroy_cm_id(rx->id);
+ ib_destroy_qp(rx->qp);
+ if (!ipoib_cm_has_srq(dev)) {
+ ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
+ spin_lock_irq(&priv->lock);
+ --priv->cm.nonsrq_conn_qp;
+ spin_unlock_irq(&priv->lock);
+ }
+ kfree(rx);
+ }
+}
+
+void ipoib_cm_dev_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_rx *p;
+ unsigned long begin;
+ int ret;
+
+ if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
+ return;
+
+ ib_destroy_cm_id(priv->cm.id);
+ priv->cm.id = NULL;
+
+ spin_lock_irq(&priv->lock);
+ while (!list_empty(&priv->cm.passive_ids)) {
+ p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
+ list_move(&p->list, &priv->cm.rx_error_list);
+ p->state = IPOIB_CM_RX_ERROR;
+ spin_unlock_irq(&priv->lock);
+ ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
+ if (ret)
+ ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
+ spin_lock_irq(&priv->lock);
+ }
+
+ /* Wait for all RX to be drained */
+ begin = jiffies;
+
+ while (!list_empty(&priv->cm.rx_error_list) ||
+ !list_empty(&priv->cm.rx_flush_list) ||
+ !list_empty(&priv->cm.rx_drain_list)) {
+ if (time_after(jiffies, begin + 5 * HZ)) {
+ ipoib_warn(priv, "RX drain timing out\n");
+
+ /*
+ * assume the HW is wedged and just free up everything.
+ */
+ list_splice_init(&priv->cm.rx_flush_list,
+ &priv->cm.rx_reap_list);
+ list_splice_init(&priv->cm.rx_error_list,
+ &priv->cm.rx_reap_list);
+ list_splice_init(&priv->cm.rx_drain_list,
+ &priv->cm.rx_reap_list);
+ break;
+ }
+ spin_unlock_irq(&priv->lock);
+ usleep_range(1000, 2000);
+ ipoib_drain_cq(dev);
+ spin_lock_irq(&priv->lock);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ ipoib_cm_free_rx_reap_list(dev);
+
+ cancel_delayed_work(&priv->cm.stale_task);
+}
+
+static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct ipoib_cm_tx *p = cm_id->context;
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ struct ipoib_cm_data *data = event->private_data;
+ struct sk_buff_head skqueue;
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+ struct sk_buff *skb;
+
+ p->mtu = be32_to_cpu(data->mtu);
+
+ if (p->mtu <= IPOIB_ENCAP_LEN) {
+ ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
+ p->mtu, IPOIB_ENCAP_LEN);
+ return -EINVAL;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
+ return ret;
+ }
+
+ qp_attr.rq_psn = 0 /* FIXME */;
+ ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
+ return ret;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
+ return ret;
+ }
+ ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
+ return ret;
+ }
+
+ skb_queue_head_init(&skqueue);
+
+ netif_tx_lock_bh(p->dev);
+ spin_lock_irq(&priv->lock);
+ set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
+ if (p->neigh)
+ while ((skb = __skb_dequeue(&p->neigh->queue)))
+ __skb_queue_tail(&skqueue, skb);
+ spin_unlock_irq(&priv->lock);
+ netif_tx_unlock_bh(p->dev);
+
+ while ((skb = __skb_dequeue(&skqueue))) {
+ skb->dev = p->dev;
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
+ }
+
+ ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ if (ret) {
+ ipoib_warn(priv, "failed to send RTU: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_init_attr attr = {
+ .send_cq = priv->send_cq,
+ .recv_cq = priv->recv_cq,
+ .srq = priv->cm.srq,
+ .cap.max_send_wr = ipoib_sendq_size,
+ .cap.max_send_sge = 1,
+ .sq_sig_type = IB_SIGNAL_ALL_WR,
+ .qp_type = IB_QPT_RC,
+ .qp_context = tx,
+ .create_flags = 0
+ };
+ struct ib_qp *tx_qp;
+
+ if (dev->features & NETIF_F_SG)
+ attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1);
+
+ tx_qp = ib_create_qp(priv->pd, &attr);
+ tx->max_send_sge = attr.cap.max_send_sge;
+ return tx_qp;
+}
+
+static int ipoib_cm_send_req(struct net_device *dev,
+ struct ib_cm_id *id, struct ib_qp *qp,
+ u32 qpn,
+ struct sa_path_rec *pathrec)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_data data = {};
+ struct ib_cm_req_param req = {};
+
+ data.qpn = cpu_to_be32(priv->qp->qp_num);
+ data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
+
+ req.primary_path = pathrec;
+ req.alternate_path = NULL;
+ req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
+ req.qp_num = qp->qp_num;
+ req.qp_type = qp->qp_type;
+ req.private_data = &data;
+ req.private_data_len = sizeof(data);
+ req.flow_control = 0;
+
+ req.starting_psn = 0; /* FIXME */
+
+ /*
+ * Pick some arbitrary defaults here; we could make these
+ * module parameters if anyone cared about setting them.
+ */
+ req.responder_resources = 4;
+ req.remote_cm_response_timeout = 20;
+ req.local_cm_response_timeout = 20;
+ req.retry_count = 0; /* RFC draft warns against retries */
+ req.rnr_retry_count = 0; /* RFC draft warns against retries */
+ req.max_cm_retries = 15;
+ req.srq = ipoib_cm_has_srq(dev);
+ return ib_send_cm_req(id, &req);
+}
+
+static int ipoib_cm_modify_tx_init(struct net_device *dev,
+ struct ib_cm_id *cm_id, struct ib_qp *qp)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+ ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
+ if (ret) {
+ ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
+ return ret;
+ }
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ qp_attr.port_num = priv->port;
+ qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
+ struct sa_path_rec *pathrec)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned int noio_flag;
+ int ret;
+
+ noio_flag = memalloc_noio_save();
+ p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring)));
+ if (!p->tx_ring) {
+ memalloc_noio_restore(noio_flag);
+ ret = -ENOMEM;
+ goto err_tx;
+ }
+
+ p->qp = ipoib_cm_create_tx_qp(p->dev, p);
+ memalloc_noio_restore(noio_flag);
+ if (IS_ERR(p->qp)) {
+ ret = PTR_ERR(p->qp);
+ ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
+ goto err_qp;
+ }
+
+ p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
+ if (IS_ERR(p->id)) {
+ ret = PTR_ERR(p->id);
+ ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
+ goto err_id;
+ }
+
+ ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
+ goto err_modify_send;
+ }
+
+ ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
+ if (ret) {
+ ipoib_warn(priv, "failed to send cm req: %d\n", ret);
+ goto err_modify_send;
+ }
+
+ ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
+ p->qp->qp_num, pathrec->dgid.raw, qpn);
+
+ return 0;
+
+err_modify_send:
+ ib_destroy_cm_id(p->id);
+err_id:
+ p->id = NULL;
+ ib_destroy_qp(p->qp);
+err_qp:
+ p->qp = NULL;
+ vfree(p->tx_ring);
+err_tx:
+ return ret;
+}
+
+static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ struct ipoib_tx_buf *tx_req;
+ unsigned long begin;
+
+ ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
+ p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
+
+ if (p->id)
+ ib_destroy_cm_id(p->id);
+
+ if (p->tx_ring) {
+ /* Wait for all sends to complete */
+ begin = jiffies;
+ while ((int) p->tx_tail - (int) p->tx_head < 0) {
+ if (time_after(jiffies, begin + 5 * HZ)) {
+ ipoib_warn(priv, "timing out; %d sends not completed\n",
+ p->tx_head - p->tx_tail);
+ goto timeout;
+ }
+
+ usleep_range(1000, 2000);
+ }
+ }
+
+timeout:
+
+ while ((int) p->tx_tail - (int) p->tx_head < 0) {
+ tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(tx_req->skb);
+ netif_tx_lock_bh(p->dev);
+ ++p->tx_tail;
+ ++priv->global_tx_tail;
+ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ netif_queue_stopped(p->dev) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ netif_wake_queue(p->dev);
+ netif_tx_unlock_bh(p->dev);
+ }
+
+ if (p->qp)
+ ib_destroy_qp(p->qp);
+
+ vfree(p->tx_ring);
+ kfree(p);
+}
+
+static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
+ const struct ib_cm_event *event)
+{
+ struct ipoib_cm_tx *tx = cm_id->context;
+ struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
+ struct net_device *dev = priv->dev;
+ struct ipoib_neigh *neigh;
+ unsigned long flags;
+ int ret;
+
+ switch (event->event) {
+ case IB_CM_DREQ_RECEIVED:
+ ipoib_dbg(priv, "DREQ received.\n");
+ ib_send_cm_drep(cm_id, NULL, 0);
+ break;
+ case IB_CM_REP_RECEIVED:
+ ipoib_dbg(priv, "REP received.\n");
+ ret = ipoib_cm_rep_handler(cm_id, event);
+ if (ret)
+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
+ NULL, 0, NULL, 0);
+ break;
+ case IB_CM_REQ_ERROR:
+ case IB_CM_REJ_RECEIVED:
+ case IB_CM_TIMEWAIT_EXIT:
+ ipoib_dbg(priv, "CM error %d.\n", event->event);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ neigh = tx->neigh;
+
+ if (neigh) {
+ neigh->cm = NULL;
+ ipoib_neigh_free(neigh);
+
+ tx->neigh = NULL;
+ }
+
+ if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+ list_move(&tx->list, &priv->cm.reap_list);
+ queue_work(priv->wq, &priv->cm.reap_task);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
+ struct ipoib_neigh *neigh)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_cm_tx *tx;
+
+ tx = kzalloc(sizeof(*tx), GFP_ATOMIC);
+ if (!tx)
+ return NULL;
+
+ neigh->cm = tx;
+ tx->neigh = neigh;
+ tx->dev = dev;
+ list_add(&tx->list, &priv->cm.start_list);
+ set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
+ queue_work(priv->wq, &priv->cm.start_task);
+ return tx;
+}
+
+void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
+ unsigned long flags;
+ if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ list_move(&tx->list, &priv->cm.reap_list);
+ queue_work(priv->wq, &priv->cm.reap_task);
+ ipoib_dbg(priv, "Reap connection for gid %pI6\n",
+ tx->neigh->daddr + 4);
+ tx->neigh = NULL;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+}
+
+#define QPN_AND_OPTIONS_OFFSET 4
+
+static void ipoib_cm_tx_start(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.start_task);
+ struct net_device *dev = priv->dev;
+ struct ipoib_neigh *neigh;
+ struct ipoib_cm_tx *p;
+ unsigned long flags;
+ struct ipoib_path *path;
+ int ret;
+
+ struct sa_path_rec pathrec;
+ u32 qpn;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ while (!list_empty(&priv->cm.start_list)) {
+ p = list_entry(priv->cm.start_list.next, typeof(*p), list);
+ list_del_init(&p->list);
+ neigh = p->neigh;
+
+ qpn = IPOIB_QPN(neigh->daddr);
+ /*
+ * As long as the search is with these 2 locks,
+ * path existence indicates its validity.
+ */
+ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ if (!path) {
+ pr_info("%s ignore not valid path %pI6\n",
+ __func__,
+ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ goto free_neigh;
+ }
+ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
+ ret = ipoib_cm_tx_init(p, qpn, &pathrec);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ret) {
+free_neigh:
+ neigh = p->neigh;
+ if (neigh) {
+ neigh->cm = NULL;
+ ipoib_neigh_free(neigh);
+ }
+ list_del(&p->list);
+ kfree(p);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+static void ipoib_cm_tx_reap(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.reap_task);
+ struct net_device *dev = priv->dev;
+ struct ipoib_cm_tx *p;
+ unsigned long flags;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ while (!list_empty(&priv->cm.reap_list)) {
+ p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
+ list_del_init(&p->list);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+ ipoib_cm_tx_destroy(p);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+static void ipoib_cm_skb_reap(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.skb_task);
+ struct net_device *dev = priv->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+ unsigned int mtu = priv->mcast_mtu;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ }
+#endif
+ dev_kfree_skb_any(skb);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
+ unsigned int mtu)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int e = skb_queue_empty(&priv->cm.skb_queue);
+
+ skb_dst_update_pmtu(skb, mtu);
+
+ skb_queue_tail(&priv->cm.skb_queue, skb);
+ if (e)
+ queue_work(priv->wq, &priv->cm.skb_task);
+}
+
+static void ipoib_cm_rx_reap(struct work_struct *work)
+{
+ ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
+ cm.rx_reap_task)->dev);
+}
+
+static void ipoib_cm_stale_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ cm.stale_task.work);
+ struct ipoib_cm_rx *p;
+ int ret;
+
+ spin_lock_irq(&priv->lock);
+ while (!list_empty(&priv->cm.passive_ids)) {
+ /* List is sorted by LRU, start from tail,
+ * stop when we see a recently used entry */
+ p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
+ if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
+ break;
+ list_move(&p->list, &priv->cm.rx_error_list);
+ p->state = IPOIB_CM_RX_ERROR;
+ spin_unlock_irq(&priv->lock);
+ ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
+ if (ret)
+ ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
+ spin_lock_irq(&priv->lock);
+ }
+
+ if (!list_empty(&priv->cm.passive_ids))
+ queue_delayed_work(priv->wq,
+ &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+ spin_unlock_irq(&priv->lock);
+}
+
+static ssize_t show_mode(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
+ return sprintf(buf, "connected\n");
+ else
+ return sprintf(buf, "datagram\n");
+}
+
+static ssize_t set_mode(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *dev = to_net_dev(d);
+ int ret;
+
+ if (!rtnl_trylock()) {
+ return restart_syscall();
+ }
+
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
+ ret = ipoib_set_mode(dev, buf);
+
+ /* The assumption is that the function ipoib_set_mode returned
+ * with the rtnl held by it, if not the value -EBUSY returned,
+ * then no need to rtnl_unlock
+ */
+ if (ret != -EBUSY)
+ rtnl_unlock();
+
+ return (!ret || ret == -EBUSY) ? count : ret;
+}
+
+static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
+
+int ipoib_cm_add_mode_attr(struct net_device *dev)
+{
+ return device_create_file(&dev->dev, &dev_attr_mode);
+}
+
+static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_srq_init_attr srq_init_attr = {
+ .srq_type = IB_SRQT_BASIC,
+ .attr = {
+ .max_wr = ipoib_recvq_size,
+ .max_sge = max_sge
+ }
+ };
+
+ priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
+ if (IS_ERR(priv->cm.srq)) {
+ if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP)
+ pr_warn("%s: failed to allocate SRQ, error %ld\n",
+ priv->ca->name, PTR_ERR(priv->cm.srq));
+ priv->cm.srq = NULL;
+ return;
+ }
+
+ priv->cm.srq_ring = vzalloc(array_size(ipoib_recvq_size,
+ sizeof(*priv->cm.srq_ring)));
+ if (!priv->cm.srq_ring) {
+ ib_destroy_srq(priv->cm.srq);
+ priv->cm.srq = NULL;
+ return;
+ }
+
+}
+
+int ipoib_cm_dev_init(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int max_srq_sge, i;
+
+ INIT_LIST_HEAD(&priv->cm.passive_ids);
+ INIT_LIST_HEAD(&priv->cm.reap_list);
+ INIT_LIST_HEAD(&priv->cm.start_list);
+ INIT_LIST_HEAD(&priv->cm.rx_error_list);
+ INIT_LIST_HEAD(&priv->cm.rx_flush_list);
+ INIT_LIST_HEAD(&priv->cm.rx_drain_list);
+ INIT_LIST_HEAD(&priv->cm.rx_reap_list);
+ INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
+ INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
+ INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
+ INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
+ INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
+
+ skb_queue_head_init(&priv->cm.skb_queue);
+
+ ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
+
+ max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
+ ipoib_cm_create_srq(dev, max_srq_sge);
+ if (ipoib_cm_has_srq(dev)) {
+ priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
+ priv->cm.num_frags = max_srq_sge;
+ ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
+ priv->cm.max_cm_mtu, priv->cm.num_frags);
+ } else {
+ priv->cm.max_cm_mtu = IPOIB_CM_MTU;
+ priv->cm.num_frags = IPOIB_CM_RX_SG;
+ }
+
+ ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
+
+ if (ipoib_cm_has_srq(dev)) {
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
+ priv->cm.num_frags - 1,
+ priv->cm.srq_ring[i].mapping,
+ GFP_KERNEL)) {
+ ipoib_warn(priv, "failed to allocate "
+ "receive buffer %d\n", i);
+ ipoib_cm_dev_cleanup(dev);
+ return -ENOMEM;
+ }
+
+ if (ipoib_cm_post_receive_srq(dev, i)) {
+ ipoib_warn(priv, "ipoib_cm_post_receive_srq "
+ "failed for buf %d\n", i);
+ ipoib_cm_dev_cleanup(dev);
+ return -EIO;
+ }
+ }
+ }
+
+ priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
+ return 0;
+}
+
+void ipoib_cm_dev_cleanup(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->cm.srq)
+ return;
+
+ ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
+
+ ib_destroy_srq(priv->cm.srq);
+ priv->cm.srq = NULL;
+ if (!priv->cm.srq_ring)
+ return;
+
+ ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
+ priv->cm.srq_ring = NULL;
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
new file mode 100644
index 000000000..67a21fdf5
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "ipoib.h"
+
+struct ipoib_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define IPOIB_NETDEV_STAT(m) { \
+ .stat_string = #m, \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct ipoib_stats ipoib_gstrings_stats[] = {
+ IPOIB_NETDEV_STAT(rx_packets),
+ IPOIB_NETDEV_STAT(tx_packets),
+ IPOIB_NETDEV_STAT(rx_bytes),
+ IPOIB_NETDEV_STAT(tx_bytes),
+ IPOIB_NETDEV_STAT(tx_errors),
+ IPOIB_NETDEV_STAT(rx_dropped),
+ IPOIB_NETDEV_STAT(tx_dropped),
+ IPOIB_NETDEV_STAT(multicast),
+};
+
+#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
+
+static void ipoib_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(netdev);
+
+ ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
+
+ strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
+ sizeof(drvinfo->bus_info));
+
+ strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
+}
+
+static int ipoib_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
+ coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
+
+ return 0;
+}
+
+static int ipoib_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ /*
+ * These values are saved in the private data and returned
+ * when ipoib_get_coalesce() is called
+ */
+ if (coal->rx_coalesce_usecs > 0xffff ||
+ coal->rx_max_coalesced_frames > 0xffff)
+ return -EINVAL;
+
+ ret = rdma_set_cq_moderation(priv->recv_cq,
+ coal->rx_max_coalesced_frames,
+ coal->rx_coalesce_usecs);
+ if (ret && ret != -EOPNOTSUPP) {
+ ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
+ return ret;
+ }
+
+ priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
+ priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
+
+ return 0;
+}
+static void ipoib_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ int i;
+ struct net_device_stats *net_stats = &dev->stats;
+ u8 *p = (u8 *)net_stats;
+
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
+ data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
+
+}
+static void ipoib_get_strings(struct net_device __always_unused *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ipoib_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ break;
+ }
+}
+static int ipoib_get_sset_count(struct net_device __always_unused *dev,
+ int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IPOIB_GLOBAL_STATS_LEN;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/* Return lane speed in unit of 1e6 bit/sec */
+static inline int ib_speed_enum_to_int(int speed)
+{
+ switch (speed) {
+ case IB_SPEED_SDR:
+ return SPEED_2500;
+ case IB_SPEED_DDR:
+ return SPEED_5000;
+ case IB_SPEED_QDR:
+ case IB_SPEED_FDR10:
+ return SPEED_10000;
+ case IB_SPEED_FDR:
+ return SPEED_14000;
+ case IB_SPEED_EDR:
+ return SPEED_25000;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+static int ipoib_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(netdev);
+ struct ib_port_attr attr;
+ int ret, speed, width;
+
+ if (!netif_carrier_ok(netdev)) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+
+ ret = ib_query_port(priv->ca, priv->port, &attr);
+ if (ret < 0)
+ return -EINVAL;
+
+ speed = ib_speed_enum_to_int(attr.active_speed);
+ width = ib_width_enum_to_int(attr.active_width);
+
+ if (speed < 0 || width < 0)
+ return -EINVAL;
+
+ /* Except the following are set, the other members of
+ * the struct ethtool_link_settings are initialized to
+ * zero in the function __ethtool_get_link_ksettings.
+ */
+ cmd->base.speed = speed * width;
+ cmd->base.duplex = DUPLEX_FULL;
+
+ cmd->base.phy_address = 0xFF;
+
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
+
+static const struct ethtool_ops ipoib_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .get_link_ksettings = ipoib_get_link_ksettings,
+ .get_drvinfo = ipoib_get_drvinfo,
+ .get_coalesce = ipoib_get_coalesce,
+ .set_coalesce = ipoib_set_coalesce,
+ .get_strings = ipoib_get_strings,
+ .get_ethtool_stats = ipoib_get_ethtool_stats,
+ .get_sset_count = ipoib_get_sset_count,
+ .get_link = ethtool_op_get_link,
+};
+
+void ipoib_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &ipoib_ethtool_ops;
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
new file mode 100644
index 000000000..12ba7a0fe
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+struct file_operations;
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ipoib.h"
+
+static struct dentry *ipoib_root;
+
+static void format_gid(union ib_gid *gid, char *buf)
+{
+ int i, n;
+
+ for (n = 0, i = 0; i < 8; ++i) {
+ n += sprintf(buf + n, "%x",
+ be16_to_cpu(((__be16 *) gid->raw)[i]));
+ if (i < 7)
+ buf[n++] = ':';
+ }
+}
+
+static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
+{
+ struct ipoib_mcast_iter *iter;
+ loff_t n = *pos;
+
+ iter = ipoib_mcast_iter_init(file->private);
+ if (!iter)
+ return NULL;
+
+ while (n--) {
+ if (ipoib_mcast_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ }
+
+ return iter;
+}
+
+static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr,
+ loff_t *pos)
+{
+ struct ipoib_mcast_iter *iter = iter_ptr;
+
+ (*pos)++;
+
+ if (ipoib_mcast_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+static void ipoib_mcg_seq_stop(struct seq_file *file, void *iter_ptr)
+{
+ /* nothing for now */
+}
+
+static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
+{
+ struct ipoib_mcast_iter *iter = iter_ptr;
+ char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
+ union ib_gid mgid;
+ unsigned long created;
+ unsigned int queuelen, complete, send_only;
+
+ if (!iter)
+ return 0;
+
+ ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
+ &complete, &send_only);
+
+ format_gid(&mgid, gid_buf);
+
+ seq_printf(file,
+ "GID: %s\n"
+ " created: %10ld\n"
+ " queuelen: %9d\n"
+ " complete: %9s\n"
+ " send_only: %8s\n"
+ "\n",
+ gid_buf, created, queuelen,
+ complete ? "yes" : "no",
+ send_only ? "yes" : "no");
+
+ return 0;
+}
+
+static const struct seq_operations ipoib_mcg_sops = {
+ .start = ipoib_mcg_seq_start,
+ .next = ipoib_mcg_seq_next,
+ .stop = ipoib_mcg_seq_stop,
+ .show = ipoib_mcg_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(ipoib_mcg);
+
+static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
+{
+ struct ipoib_path_iter *iter;
+ loff_t n = *pos;
+
+ iter = ipoib_path_iter_init(file->private);
+ if (!iter)
+ return NULL;
+
+ while (n--) {
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ }
+
+ return iter;
+}
+
+static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr,
+ loff_t *pos)
+{
+ struct ipoib_path_iter *iter = iter_ptr;
+
+ (*pos)++;
+
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr)
+{
+ /* nothing for now */
+}
+
+static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
+{
+ struct ipoib_path_iter *iter = iter_ptr;
+ char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
+ struct ipoib_path path;
+ int rate;
+
+ if (!iter)
+ return 0;
+
+ ipoib_path_iter_read(iter, &path);
+
+ format_gid(&path.pathrec.dgid, gid_buf);
+
+ seq_printf(file,
+ "GID: %s\n"
+ " complete: %6s\n",
+ gid_buf, sa_path_get_dlid(&path.pathrec) ? "yes" : "no");
+
+ if (sa_path_get_dlid(&path.pathrec)) {
+ rate = ib_rate_to_mbps(path.pathrec.rate);
+
+ seq_printf(file,
+ " DLID: 0x%04x\n"
+ " SL: %12d\n"
+ " rate: %8d.%d Gb/sec\n",
+ be32_to_cpu(sa_path_get_dlid(&path.pathrec)),
+ path.pathrec.sl,
+ rate / 1000, rate % 1000);
+ }
+
+ seq_putc(file, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations ipoib_path_sops = {
+ .start = ipoib_path_seq_start,
+ .next = ipoib_path_seq_next,
+ .stop = ipoib_path_seq_stop,
+ .show = ipoib_path_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(ipoib_path);
+
+void ipoib_create_debug_files(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ char name[IFNAMSIZ + sizeof("_path")];
+
+ snprintf(name, sizeof(name), "%s_mcg", dev->name);
+ priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ ipoib_root, dev, &ipoib_mcg_fops);
+
+ snprintf(name, sizeof(name), "%s_path", dev->name);
+ priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ ipoib_root, dev, &ipoib_path_fops);
+}
+
+void ipoib_delete_debug_files(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ debugfs_remove(priv->mcg_dentry);
+ debugfs_remove(priv->path_dentry);
+ priv->mcg_dentry = priv->path_dentry = NULL;
+}
+
+void ipoib_register_debugfs(void)
+{
+ ipoib_root = debugfs_create_dir("ipoib", NULL);
+}
+
+void ipoib_unregister_debugfs(void)
+{
+ debugfs_remove(ipoib_root);
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
new file mode 100644
index 000000000..494f413dc
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -0,0 +1,1312 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <rdma/ib_cache.h>
+
+#include "ipoib.h"
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+static int data_debug_level;
+
+module_param(data_debug_level, int, 0644);
+MODULE_PARM_DESC(data_debug_level,
+ "Enable data path debug tracing if > 0");
+#endif
+
+struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
+ struct ib_pd *pd, struct rdma_ah_attr *attr)
+{
+ struct ipoib_ah *ah;
+ struct ib_ah *vah;
+
+ ah = kmalloc(sizeof(*ah), GFP_KERNEL);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ah->dev = dev;
+ ah->last_send = 0;
+ kref_init(&ah->ref);
+
+ vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE);
+ if (IS_ERR(vah)) {
+ kfree(ah);
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
+ ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
+ }
+
+ return ah;
+}
+
+void ipoib_free_ah(struct kref *kref)
+{
+ struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
+ struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add_tail(&ah->list, &priv->dead_ahs);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
+ u64 mapping[IPOIB_UD_RX_SG])
+{
+ ib_dma_unmap_single(priv->ca, mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
+}
+
+static int ipoib_ib_post_receive(struct net_device *dev, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+ priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
+ priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
+
+
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
+ dev_kfree_skb_any(priv->rx_ring[id].skb);
+ priv->rx_ring[id].skb = NULL;
+ }
+
+ return ret;
+}
+
+static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sk_buff *skb;
+ int buf_size;
+ u64 *mapping;
+
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
+ */
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
+
+ mapping = priv->rx_ring[id].mapping;
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
+ goto error;
+
+ priv->rx_ring[id].skb = skb;
+ return skb;
+error:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static int ipoib_ib_post_receives(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ if (!ipoib_alloc_rx_skb(dev, i)) {
+ ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+ return -ENOMEM;
+ }
+ if (ipoib_ib_post_receive(dev, i)) {
+ ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
+ struct sk_buff *skb;
+ u64 mapping[IPOIB_UD_RX_SG];
+ union ib_gid *dgid;
+ union ib_gid *sgid;
+
+ ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_recvq_size)) {
+ ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_recvq_size);
+ return;
+ }
+
+ skb = priv->rx_ring[wr_id].skb;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ ipoib_warn(priv,
+ "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
+ dev_kfree_skb_any(skb);
+ priv->rx_ring[wr_id].skb = NULL;
+ return;
+ }
+
+ memcpy(mapping, priv->rx_ring[wr_id].mapping,
+ IPOIB_UD_RX_SG * sizeof(*mapping));
+
+ /*
+ * If we can't allocate a new RX buffer, dump
+ * this packet and reuse the old buffer.
+ */
+ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
+ ++dev->stats.rx_dropped;
+ goto repost;
+ }
+
+ ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ wc->byte_len, wc->slid);
+
+ ipoib_ud_dma_unmap_rx(priv, mapping);
+
+ skb_put(skb, wc->byte_len);
+
+ /* First byte of dgid signals multicast when 0xff */
+ dgid = &((struct ib_grh *)skb->data)->dgid;
+
+ if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
+ skb->pkt_type = PACKET_HOST;
+ else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+
+ sgid = &((struct ib_grh *)skb->data)->sgid;
+
+ /*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
+ int need_repost = 1;
+
+ if ((wc->wc_flags & IB_WC_GRH) &&
+ sgid->global.interface_id != priv->local_gid.global.interface_id)
+ need_repost = 0;
+
+ if (need_repost) {
+ dev_kfree_skb_any(skb);
+ goto repost;
+ }
+ }
+
+ skb_pull(skb, IB_GRH_BYTES);
+
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
+
+ skb->dev = dev;
+ if ((dev->features & NETIF_F_RXCSUM) &&
+ likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&priv->recv_napi, skb);
+
+repost:
+ if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
+ ipoib_warn(priv, "ipoib_ib_post_receive failed "
+ "for buf %d\n", wr_id);
+}
+
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
+{
+ struct sk_buff *skb = tx_req->skb;
+ u64 *mapping = tx_req->mapping;
+ int i;
+ int off;
+
+ if (skb_headlen(skb)) {
+ mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
+ return -EIO;
+
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ mapping[i + off] = ib_dma_map_page(ca,
+ skb_frag_page(frag),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
+ goto partial_error;
+ }
+ return 0;
+
+partial_error:
+ for (; i > 0; --i) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ if (off)
+ ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+
+ return -EIO;
+}
+
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
+{
+ struct sk_buff *skb = tx_req->skb;
+ u64 *mapping = tx_req->mapping;
+ int i;
+ int off;
+
+ if (skb_headlen(skb)) {
+ ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
+ DMA_TO_DEVICE);
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ib_dma_unmap_page(priv->ca, mapping[i + off],
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+}
+
+/*
+ * As the result of a completion error the QP Can be transferred to SQE states.
+ * The function checks if the (send)QP is in SQE state and
+ * moves it back to RTS state, that in order to have it functional again.
+ */
+static void ipoib_qp_state_validate_work(struct work_struct *work)
+{
+ struct ipoib_qp_state_validate *qp_work =
+ container_of(work, struct ipoib_qp_state_validate, work);
+
+ struct ipoib_dev_priv *priv = qp_work->priv;
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
+ __func__, ret);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x is in state: %d\n",
+ __func__, priv->qp->qp_num, qp_attr.qp_state);
+
+ /* currently support only in SQE->RTS transition*/
+ if (qp_attr.qp_state == IB_QPS_SQE) {
+ qp_attr.qp_state = IB_QPS_RTS;
+
+ ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
+ if (ret) {
+ pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
+ ret, priv->qp->qp_num);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
+ __func__, priv->qp->qp_num);
+ } else {
+ pr_warn("QP (%d) will stay in state: %d\n",
+ priv->qp->qp_num, qp_attr.qp_state);
+ }
+
+free_res:
+ kfree(qp_work);
+}
+
+static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ unsigned int wr_id = wc->wr_id;
+ struct ipoib_tx_buf *tx_req;
+
+ ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
+ wr_id, wc->status);
+
+ if (unlikely(wr_id >= ipoib_sendq_size)) {
+ ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_sendq_size);
+ return;
+ }
+
+ tx_req = &priv->tx_ring[wr_id];
+
+ ipoib_dma_unmap_tx(priv, tx_req);
+
+ ++dev->stats.tx_packets;
+ dev->stats.tx_bytes += tx_req->skb->len;
+
+ dev_kfree_skb_any(tx_req->skb);
+
+ ++priv->tx_tail;
+ ++priv->global_tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
+ netif_wake_queue(dev);
+
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct ipoib_qp_state_validate *qp_work;
+ ipoib_warn(priv,
+ "failed send event (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
+ if (!qp_work)
+ return;
+
+ INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
+ qp_work->priv = priv;
+ queue_work(priv->wq, &qp_work->work);
+ }
+}
+
+static int poll_tx(struct ipoib_dev_priv *priv)
+{
+ int n, i;
+ struct ib_wc *wc;
+
+ n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
+ for (i = 0; i < n; ++i) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
+ else
+ ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
+ }
+ return n == MAX_SEND_CQE;
+}
+
+int ipoib_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(napi, struct ipoib_dev_priv, recv_napi);
+ struct net_device *dev = priv->dev;
+ int done;
+ int t;
+ int n, i;
+
+ done = 0;
+
+poll_more:
+ while (done < budget) {
+ int max = (budget - done);
+
+ t = min(IPOIB_NUM_WC, max);
+ n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
+
+ for (i = 0; i < n; i++) {
+ struct ib_wc *wc = priv->ibwc + i;
+
+ if (wc->wr_id & IPOIB_OP_RECV) {
+ ++done;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_rx_wc(dev, wc);
+ else
+ ipoib_ib_handle_rx_wc(dev, wc);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
+ }
+
+ if (n != t)
+ break;
+ }
+
+ if (done < budget) {
+ napi_complete(napi);
+ if (unlikely(ib_req_notify_cq(priv->recv_cq,
+ IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) &&
+ napi_reschedule(napi))
+ goto poll_more;
+ }
+
+ return done;
+}
+
+int ipoib_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
+ send_napi);
+ struct net_device *dev = priv->dev;
+ int n, i;
+ struct ib_wc *wc;
+
+poll_more:
+ n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
+
+ for (i = 0; i < n; i++) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(dev, wc);
+ else
+ ipoib_ib_handle_tx_wc(dev, wc);
+ }
+
+ if (n < budget) {
+ napi_complete(napi);
+ if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) &&
+ napi_reschedule(napi))
+ goto poll_more;
+ }
+ return n < 0 ? 0 : n;
+}
+
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
+{
+ struct ipoib_dev_priv *priv = ctx_ptr;
+
+ napi_schedule(&priv->recv_napi);
+}
+
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
+{
+ struct ipoib_dev_priv *priv = ctx_ptr;
+
+ napi_schedule(&priv->send_napi);
+}
+
+static inline int post_send(struct ipoib_dev_priv *priv,
+ unsigned int wr_id,
+ struct ib_ah *address, u32 dqpn,
+ struct ipoib_tx_buf *tx_req,
+ void *head, int hlen)
+{
+ struct sk_buff *skb = tx_req->skb;
+
+ ipoib_build_sge(priv, tx_req);
+
+ priv->tx_wr.wr.wr_id = wr_id;
+ priv->tx_wr.remote_qpn = dqpn;
+ priv->tx_wr.ah = address;
+
+ if (head) {
+ priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
+ priv->tx_wr.header = head;
+ priv->tx_wr.hlen = hlen;
+ priv->tx_wr.wr.opcode = IB_WR_LSO;
+ } else
+ priv->tx_wr.wr.opcode = IB_WR_SEND;
+
+ return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
+}
+
+int ipoib_send(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_tx_buf *tx_req;
+ int hlen, rc;
+ void *phead;
+ unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
+
+ if (skb_is_gso(skb)) {
+ hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ phead = skb->data;
+ if (unlikely(!skb_pull(skb, hlen))) {
+ ipoib_warn(priv, "linear data too small\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ } else {
+ if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
+ ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
+ return -1;
+ }
+ phead = NULL;
+ hlen = 0;
+ }
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+ }
+
+ ipoib_dbg_data(priv,
+ "sending packet, length=%d address=%p dqpn=0x%06x\n",
+ skb->len, address, dqpn);
+
+ /*
+ * We put the skb into the tx_ring _before_ we call post_send()
+ * because it's entirely possible that the completion handler will
+ * run before we execute anything after the post_send(). That
+ * means we have to make sure everything is properly recorded and
+ * our state is consistent before we call post_send().
+ */
+ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
+ tx_req->skb = skb;
+ if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
+ else
+ priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
+ /* increase the tx_head after send success, but use it for queue state */
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
+ ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
+ netif_stop_queue(dev);
+ }
+
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
+ if (netif_queue_stopped(dev))
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS) < 0)
+ ipoib_warn(priv, "request notify on send CQ failed\n");
+
+ rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
+ address, dqpn, tx_req, phead, hlen);
+ if (unlikely(rc)) {
+ ipoib_warn(priv, "post_send failed, error %d\n", rc);
+ ++dev->stats.tx_errors;
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ rc = 0;
+ } else {
+ netif_trans_update(dev);
+
+ rc = priv->tx_head;
+ ++priv->tx_head;
+ ++priv->global_tx_head;
+ }
+ return rc;
+}
+
+static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_ah *ah, *tah;
+ unsigned long flags;
+
+ netif_tx_lock_bh(priv->dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
+ if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
+ list_del(&ah->list);
+ rdma_destroy_ah(ah->ah, 0);
+ kfree(ah);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(priv->dev);
+}
+
+void ipoib_reap_ah(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+
+ ipoib_reap_dead_ahs(priv);
+
+ if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ round_jiffies_relative(HZ));
+}
+
+static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
+{
+ clear_bit(IPOIB_STOP_REAPER, &priv->flags);
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
+ round_jiffies_relative(HZ));
+}
+
+static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
+{
+ set_bit(IPOIB_STOP_REAPER, &priv->flags);
+ cancel_delayed_work(&priv->ah_reap_task);
+ /*
+ * After ipoib_stop_ah_reaper() we always go through
+ * ipoib_reap_dead_ahs() which ensures the work is really stopped and
+ * does a final flush out of the dead_ah's list
+ */
+}
+
+static int recvs_pending(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int pending = 0;
+ int i;
+
+ for (i = 0; i < ipoib_recvq_size; ++i)
+ if (priv->rx_ring[i].skb)
+ ++pending;
+
+ return pending;
+}
+
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+ struct ib_qp *qp,
+ enum ib_qp_state new_state)
+{
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+ return;
+ }
+ /* print according to the new-state and the previous state.*/
+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+ else
+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+ new_state, qp_attr.qp_state);
+}
+
+static void ipoib_napi_enable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_enable(&priv->recv_napi);
+ napi_enable(&priv->send_napi);
+}
+
+static void ipoib_napi_disable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_disable(&priv->recv_napi);
+ napi_disable(&priv->send_napi);
+}
+
+int ipoib_ib_dev_stop_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr qp_attr;
+ unsigned long begin;
+ struct ipoib_tx_buf *tx_req;
+ int i;
+
+ if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_napi_disable(dev);
+
+ ipoib_cm_dev_stop(dev);
+
+ /*
+ * Move our QP to the error state and then reinitialize in
+ * when all work requests have completed or have been flushed.
+ */
+ qp_attr.qp_state = IB_QPS_ERR;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
+
+ /* Wait for all sends and receives to complete */
+ begin = jiffies;
+
+ while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
+ if (time_after(jiffies, begin + 5 * HZ)) {
+ ipoib_warn(priv,
+ "timing out; %d sends %d receives not completed\n",
+ priv->tx_head - priv->tx_tail,
+ recvs_pending(dev));
+
+ /*
+ * assume the HW is wedged and just free up
+ * all our pending work requests.
+ */
+ while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
+ tx_req = &priv->tx_ring[priv->tx_tail &
+ (ipoib_sendq_size - 1)];
+ ipoib_dma_unmap_tx(priv, tx_req);
+ dev_kfree_skb_any(tx_req->skb);
+ ++priv->tx_tail;
+ ++priv->global_tx_tail;
+ }
+
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ struct ipoib_rx_buf *rx_req;
+
+ rx_req = &priv->rx_ring[i];
+ if (!rx_req->skb)
+ continue;
+ ipoib_ud_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
+ dev_kfree_skb_any(rx_req->skb);
+ rx_req->skb = NULL;
+ }
+
+ goto timeout;
+ }
+
+ ipoib_drain_cq(dev);
+
+ usleep_range(1000, 2000);
+ }
+
+ ipoib_dbg(priv, "All sends and receives done.\n");
+
+timeout:
+ qp_attr.qp_state = IB_QPS_RESET;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ ipoib_warn(priv, "Failed to modify QP to RESET state\n");
+
+ ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
+
+ return 0;
+}
+
+int ipoib_ib_dev_open_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ ret = ipoib_init_qp(dev);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
+ return -1;
+ }
+
+ ret = ipoib_ib_post_receives(dev);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
+ goto out;
+ }
+
+ ret = ipoib_cm_dev_open(dev);
+ if (ret) {
+ ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
+ goto out;
+ }
+
+ if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_napi_enable(dev);
+
+ return 0;
+out:
+ return -1;
+}
+
+int ipoib_ib_dev_open(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_pkey_dev_check_presence(dev);
+
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
+ ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
+ (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
+ return -1;
+ }
+
+ ipoib_start_ah_reaper(priv);
+ if (priv->rn_ops->ndo_open(dev)) {
+ pr_warn("%s: Failed to open dev\n", dev->name);
+ goto dev_stop;
+ }
+
+ set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+
+ return 0;
+
+dev_stop:
+ ipoib_stop_ah_reaper(priv);
+ return -1;
+}
+
+void ipoib_ib_dev_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ priv->rn_ops->ndo_stop(dev);
+
+ clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ ipoib_stop_ah_reaper(priv);
+}
+
+void ipoib_pkey_dev_check_presence(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ if (!(priv->pkey & 0x7fff) ||
+ ib_find_pkey(priv->ca, priv->port, priv->pkey,
+ &priv->pkey_index)) {
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ } else {
+ if (rn->set_id)
+ rn->set_id(dev, priv->pkey_index);
+ set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ }
+}
+
+void ipoib_ib_dev_up(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_pkey_dev_check_presence(dev);
+
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
+ ipoib_dbg(priv, "PKEY is not assigned.\n");
+ return;
+ }
+
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+
+ ipoib_mcast_start_thread(dev);
+}
+
+void ipoib_ib_dev_down(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "downing ib_dev\n");
+
+ clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ netif_carrier_off(dev);
+
+ ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_dev_flush(dev);
+
+ ipoib_flush_paths(dev);
+}
+
+void ipoib_drain_cq(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int i, n;
+
+ /*
+ * We call completion handling routines that expect to be
+ * called from the BH-disabled NAPI poll context, so disable
+ * BHs here too.
+ */
+ local_bh_disable();
+
+ do {
+ n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
+ for (i = 0; i < n; ++i) {
+ /*
+ * Convert any successful completions to flush
+ * errors to avoid passing packets up the
+ * stack after bringing the device down.
+ */
+ if (priv->ibwc[i].status == IB_WC_SUCCESS)
+ priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
+
+ if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
+ if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
+ else
+ ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
+ }
+ } while (n == IPOIB_NUM_WC);
+
+ while (poll_tx(priv))
+ ; /* nothing */
+
+ local_bh_enable();
+}
+
+/*
+ * Takes whatever value which is in pkey index 0 and updates priv->pkey
+ * returns 0 if the pkey value was changed.
+ */
+static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
+{
+ int result;
+ u16 prev_pkey;
+
+ prev_pkey = priv->pkey;
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
+ priv->port, result);
+ return result;
+ }
+
+ priv->pkey |= 0x8000;
+
+ if (prev_pkey != priv->pkey) {
+ ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
+ prev_pkey, priv->pkey);
+ /*
+ * Update the pkey in the broadcast address, while making sure to set
+ * the full membership bit, so that we join the right broadcast group.
+ */
+ priv->dev->broadcast[8] = priv->pkey >> 8;
+ priv->dev->broadcast[9] = priv->pkey & 0xff;
+ return 0;
+ }
+
+ return 1;
+}
+/*
+ * returns 0 if pkey value was found in a different slot.
+ */
+static inline int update_child_pkey(struct ipoib_dev_priv *priv)
+{
+ u16 old_index = priv->pkey_index;
+
+ priv->pkey_index = 0;
+ ipoib_pkey_dev_check_presence(priv->dev);
+
+ if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
+ (old_index == priv->pkey_index))
+ return 1;
+ return 0;
+}
+
+/*
+ * returns true if the device address of the ipoib interface has changed and the
+ * new address is a valid one (i.e in the gid table), return false otherwise.
+ */
+static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
+{
+ union ib_gid search_gid;
+ union ib_gid gid0;
+ union ib_gid *netdev_gid;
+ int err;
+ u16 index;
+ u8 port;
+ bool ret = false;
+
+ netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
+ if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
+ return false;
+
+ netif_addr_lock_bh(priv->dev);
+
+ /* The subnet prefix may have changed, update it now so we won't have
+ * to do it later
+ */
+ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+ netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
+ search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+
+ search_gid.global.interface_id = priv->local_gid.global.interface_id;
+
+ netif_addr_unlock_bh(priv->dev);
+
+ err = ib_find_gid(priv->ca, &search_gid, &port, &index);
+
+ netif_addr_lock_bh(priv->dev);
+
+ if (search_gid.global.interface_id !=
+ priv->local_gid.global.interface_id)
+ /* There was a change while we were looking up the gid, bail
+ * here and let the next work sort this out
+ */
+ goto out;
+
+ /* The next section of code needs some background:
+ * Per IB spec the port GUID can't change if the HCA is powered on.
+ * port GUID is the basis for GID at index 0 which is the basis for
+ * the default device address of a ipoib interface.
+ *
+ * so it seems the flow should be:
+ * if user_changed_dev_addr && gid in gid tbl
+ * set bit dev_addr_set
+ * return true
+ * else
+ * return false
+ *
+ * The issue is that there are devices that don't follow the spec,
+ * they change the port GUID when the HCA is powered, so in order
+ * not to break userspace applications, We need to check if the
+ * user wanted to control the device address and we assume that
+ * if he sets the device address back to be based on GID index 0,
+ * he no longer wishs to control it.
+ *
+ * If the user doesn't control the the device address,
+ * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
+ * the port GUID has changed and GID at index 0 has changed
+ * so we need to change priv->local_gid and priv->dev->dev_addr
+ * to reflect the new GID.
+ */
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ if (!err && port == priv->port) {
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+ if (index == 0)
+ clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
+ &priv->flags);
+ else
+ set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
+ ret = true;
+ } else {
+ ret = false;
+ }
+ } else {
+ if (!err && port == priv->port) {
+ ret = true;
+ } else {
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
+ memcpy(&priv->local_gid, &gid0,
+ sizeof(priv->local_gid));
+ memcpy(priv->dev->dev_addr + 4, &gid0,
+ sizeof(priv->local_gid));
+ ret = true;
+ }
+ }
+ }
+
+out:
+ netif_addr_unlock_bh(priv->dev);
+
+ return ret;
+}
+
+static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level,
+ int nesting)
+{
+ struct ipoib_dev_priv *cpriv;
+ struct net_device *dev = priv->dev;
+ int result;
+
+ down_read_nested(&priv->vlan_rwsem, nesting);
+
+ /*
+ * Flush any child interfaces too -- they might be up even if
+ * the parent is down.
+ */
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
+
+ up_read(&priv->vlan_rwsem);
+
+ if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
+ level != IPOIB_FLUSH_HEAVY) {
+ /* Make sure the dev_addr is set even if not flushing */
+ if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
+ ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
+ return;
+ }
+
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
+ /* interface is down. update pkey and leave. */
+ if (level == IPOIB_FLUSH_HEAVY) {
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ update_parent_pkey(priv);
+ else
+ update_child_pkey(priv);
+ } else if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
+ ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
+ return;
+ }
+
+ if (level == IPOIB_FLUSH_HEAVY) {
+ /* child devices chase their origin pkey value, while non-child
+ * (parent) devices should always takes what present in pkey index 0
+ */
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ result = update_child_pkey(priv);
+ if (result) {
+ /* restart QP only if P_Key index is changed */
+ ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
+ return;
+ }
+
+ } else {
+ result = update_parent_pkey(priv);
+ /* restart QP only if P_Key value changed */
+ if (result) {
+ ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
+ return;
+ }
+ }
+ }
+
+ if (level == IPOIB_FLUSH_LIGHT) {
+ int oper_up;
+ ipoib_mark_paths_invalid(dev);
+ /* Set IPoIB operation as down to prevent races between:
+ * the flush flow which leaves MCG and on the fly joins
+ * which can happen during that time. mcast restart task
+ * should deal with join requests we missed.
+ */
+ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_mcast_dev_flush(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
+ ipoib_reap_dead_ahs(priv);
+ }
+
+ if (level >= IPOIB_FLUSH_NORMAL)
+ ipoib_ib_dev_down(dev);
+
+ if (level == IPOIB_FLUSH_HEAVY) {
+ if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ ipoib_ib_dev_stop(dev);
+
+ if (ipoib_ib_dev_open(dev))
+ return;
+
+ if (netif_queue_stopped(dev))
+ netif_start_queue(dev);
+ }
+
+ /*
+ * The device could have been brought down between the start and when
+ * we get here, don't bring it back up if it's not configured up
+ */
+ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
+ if (level >= IPOIB_FLUSH_NORMAL)
+ ipoib_ib_dev_up(dev);
+ if (ipoib_dev_addr_changed_valid(priv))
+ ipoib_mcast_restart_task(&priv->restart_task);
+ }
+}
+
+void ipoib_ib_dev_flush_light(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, flush_light);
+
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
+}
+
+void ipoib_ib_dev_flush_normal(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, flush_normal);
+
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
+}
+
+void ipoib_ib_dev_flush_heavy(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, flush_heavy);
+
+ rtnl_lock();
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ rtnl_unlock();
+}
+
+void ipoib_ib_dev_cleanup(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "cleaning up ib_dev\n");
+ /*
+ * We must make sure there are no more (path) completions
+ * that may wish to touch priv fields that are no longer valid
+ */
+ ipoib_flush_paths(dev);
+
+ ipoib_mcast_stop_thread(dev);
+ ipoib_mcast_dev_flush(dev);
+
+ /*
+ * All of our ah references aren't free until after
+ * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
+ * the neighbor garbage collection is stopped and reaped.
+ * That should all be done now, so make a final ah flush.
+ */
+ ipoib_reap_dead_ahs(priv);
+
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+
+ priv->rn_ops->ndo_uninit(dev);
+
+ if (priv->pd) {
+ ib_dealloc_pd(priv->pd);
+ priv->pd = NULL;
+ }
+}
+
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
new file mode 100644
index 000000000..35322d23f
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -0,0 +1,2686 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ipoib.h"
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
+
+#include <linux/ip.h>
+#include <linux/in.h>
+
+#include <linux/jhash.h>
+#include <net/arp.h>
+#include <net/addrconf.h>
+#include <linux/inetdevice.h>
+#include <rdma/ib_cache.h>
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
+int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
+
+module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
+MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
+module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
+MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+int ipoib_debug_level;
+
+module_param_named(debug_level, ipoib_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
+#endif
+
+struct ipoib_path_iter {
+ struct net_device *dev;
+ struct ipoib_path path;
+};
+
+static const u8 ipv4_bcast_addr[] = {
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+
+struct workqueue_struct *ipoib_workqueue;
+
+struct ib_sa_client ipoib_sa_client;
+
+static int ipoib_add_one(struct ib_device *device);
+static void ipoib_remove_one(struct ib_device *device, void *client_data);
+static void ipoib_neigh_reclaim(struct rcu_head *rp);
+static struct net_device *ipoib_get_net_dev_by_params(
+ struct ib_device *dev, u8 port, u16 pkey,
+ const union ib_gid *gid, const struct sockaddr *addr,
+ void *client_data);
+static int ipoib_set_mac(struct net_device *dev, void *addr);
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd);
+
+static struct ib_client ipoib_client = {
+ .name = "ipoib",
+ .add = ipoib_add_one,
+ .remove = ipoib_remove_one,
+ .get_net_dev_by_params = ipoib_get_net_dev_by_params,
+};
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static int ipoib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_info *ni = ptr;
+ struct net_device *dev = ni->dev;
+
+ if (dev->netdev_ops->ndo_open != ipoib_open)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_CHANGENAME:
+ ipoib_delete_debug_files(dev);
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_UNREGISTER:
+ ipoib_delete_debug_files(dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
+int ipoib_open(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "bringing up interface\n");
+
+ netif_carrier_off(dev);
+
+ set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+
+ priv->sm_fullmember_sendonly_support = false;
+
+ if (ipoib_ib_dev_open(dev)) {
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
+ return 0;
+ goto err_disable;
+ }
+
+ ipoib_ib_dev_up(dev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ /* Bring up any child interfaces too */
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(cpriv, &priv->child_intfs, list) {
+ int flags;
+
+ flags = cpriv->dev->flags;
+ if (flags & IFF_UP)
+ continue;
+
+ dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
+ }
+ up_read(&priv->vlan_rwsem);
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_disable:
+ clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+
+ return -EINVAL;
+}
+
+static int ipoib_stop(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "stopping interface\n");
+
+ clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+
+ netif_stop_queue(dev);
+
+ ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_stop(dev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ /* Bring down any child interfaces too */
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(cpriv, &priv->child_intfs, list) {
+ int flags;
+
+ flags = cpriv->dev->flags;
+ if (!(flags & IFF_UP))
+ continue;
+
+ dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
+ }
+ up_read(&priv->vlan_rwsem);
+ }
+
+ return 0;
+}
+
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+
+ return features;
+}
+
+static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = 0;
+
+ /* dev->mtu > 2K ==> connected mode */
+ if (ipoib_cm_admin_enabled(dev)) {
+ if (new_mtu > ipoib_cm_max_mtu(dev))
+ return -EINVAL;
+
+ if (new_mtu > priv->mcast_mtu)
+ ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
+ priv->mcast_mtu);
+
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
+ new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
+ return -EINVAL;
+
+ priv->admin_mtu = new_mtu;
+
+ if (priv->mcast_mtu < priv->admin_mtu)
+ ipoib_dbg(priv, "MTU must be smaller than the underlying "
+ "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
+
+ new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
+
+ if (priv->rn_ops->ndo_change_mtu) {
+ bool carrier_status = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+
+ /* notify lower level on the real mtu */
+ ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
+
+ if (carrier_status)
+ netif_carrier_on(dev);
+ } else {
+ dev->mtu = new_mtu;
+ }
+
+ return ret;
+}
+
+static void ipoib_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->rn_ops->ndo_get_stats64)
+ priv->rn_ops->ndo_get_stats64(dev, stats);
+ else
+ netdev_stats_to_stats64(stats, &dev->stats);
+}
+
+/* Called with an RCU read lock taken */
+static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
+ struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+ struct in_device *in_dev;
+ struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
+ struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
+ __be32 ret_addr;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ in_dev = in_dev_get(dev);
+ if (!in_dev)
+ return false;
+
+ ret_addr = inet_confirm_addr(net, in_dev, 0,
+ addr_in->sin_addr.s_addr,
+ RT_SCOPE_HOST);
+ in_dev_put(in_dev);
+ if (ret_addr)
+ return true;
+
+ break;
+ case AF_INET6:
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
+ return true;
+
+ break;
+ }
+ return false;
+}
+
+/**
+ * Find the master net_device on top of the given net_device.
+ * @dev: base IPoIB net_device
+ *
+ * Returns the master net_device with a reference held, or the same net_device
+ * if no master exists.
+ */
+static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
+{
+ struct net_device *master;
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master)
+ dev_hold(master);
+ rcu_read_unlock();
+
+ if (master)
+ return master;
+
+ dev_hold(dev);
+ return dev;
+}
+
+struct ipoib_walk_data {
+ const struct sockaddr *addr;
+ struct net_device *result;
+};
+
+static int ipoib_upper_walk(struct net_device *upper,
+ struct netdev_nested_priv *priv)
+{
+ struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data;
+ int ret = 0;
+
+ if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
+ dev_hold(upper);
+ data->result = upper;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Find a net_device matching the given address, which is an upper device of
+ * the given net_device.
+ * @addr: IP address to look for.
+ * @dev: base IPoIB net_device
+ *
+ * If found, returns the net_device with a reference held. Otherwise return
+ * NULL.
+ */
+static struct net_device *ipoib_get_net_dev_match_addr(
+ const struct sockaddr *addr, struct net_device *dev)
+{
+ struct netdev_nested_priv priv;
+ struct ipoib_walk_data data = {
+ .addr = addr,
+ };
+
+ priv.data = (void *)&data;
+ rcu_read_lock();
+ if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
+ dev_hold(dev);
+ data.result = dev;
+ goto out;
+ }
+
+ netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv);
+out:
+ rcu_read_unlock();
+ return data.result;
+}
+
+/* returns the number of IPoIB netdevs on top a given ipoib device matching a
+ * pkey_index and address, if one exists.
+ *
+ * @found_net_dev: contains a matching net_device if the return value >= 1,
+ * with a reference held. */
+static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
+ const union ib_gid *gid,
+ u16 pkey_index,
+ const struct sockaddr *addr,
+ int nesting,
+ struct net_device **found_net_dev)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *net_dev = NULL;
+ int matches = 0;
+
+ if (priv->pkey_index == pkey_index &&
+ (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
+ if (!addr) {
+ net_dev = ipoib_get_master_net_dev(priv->dev);
+ } else {
+ /* Verify the net_device matches the IP address, as
+ * IPoIB child devices currently share a GID. */
+ net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
+ }
+ if (net_dev) {
+ if (!*found_net_dev)
+ *found_net_dev = net_dev;
+ else
+ dev_put(net_dev);
+ ++matches;
+ }
+ }
+
+ /* Check child interfaces */
+ down_read_nested(&priv->vlan_rwsem, nesting);
+ list_for_each_entry(child_priv, &priv->child_intfs, list) {
+ matches += ipoib_match_gid_pkey_addr(child_priv, gid,
+ pkey_index, addr,
+ nesting + 1,
+ found_net_dev);
+ if (matches > 1)
+ break;
+ }
+ up_read(&priv->vlan_rwsem);
+
+ return matches;
+}
+
+/* Returns the number of matching net_devs found (between 0 and 2). Also
+ * return the matching net_device in the @net_dev parameter, holding a
+ * reference to the net_device, if the number of matches >= 1 */
+static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
+ u16 pkey_index,
+ const union ib_gid *gid,
+ const struct sockaddr *addr,
+ struct net_device **net_dev)
+{
+ struct ipoib_dev_priv *priv;
+ int matches = 0;
+
+ *net_dev = NULL;
+
+ list_for_each_entry(priv, dev_list, list) {
+ if (priv->port != port)
+ continue;
+
+ matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
+ addr, 0, net_dev);
+ if (matches > 1)
+ break;
+ }
+
+ return matches;
+}
+
+static struct net_device *ipoib_get_net_dev_by_params(
+ struct ib_device *dev, u8 port, u16 pkey,
+ const union ib_gid *gid, const struct sockaddr *addr,
+ void *client_data)
+{
+ struct net_device *net_dev;
+ struct list_head *dev_list = client_data;
+ u16 pkey_index;
+ int matches;
+ int ret;
+
+ if (!rdma_protocol_ib(dev, port))
+ return NULL;
+
+ ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
+ if (ret)
+ return NULL;
+
+ /* See if we can find a unique device matching the L2 parameters */
+ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
+ gid, NULL, &net_dev);
+
+ switch (matches) {
+ case 0:
+ return NULL;
+ case 1:
+ return net_dev;
+ }
+
+ dev_put(net_dev);
+
+ /* Couldn't find a unique device with L2 parameters only. Use L3
+ * address to uniquely match the net device */
+ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
+ gid, addr, &net_dev);
+ switch (matches) {
+ case 0:
+ return NULL;
+ default:
+ dev_warn_ratelimited(&dev->dev,
+ "duplicate IP address detected\n");
+ fallthrough;
+ case 1:
+ return net_dev;
+ }
+}
+
+int ipoib_set_mode(struct net_device *dev, const char *buf)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "connected\n")) ||
+ (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "datagram\n"))) {
+ return 0;
+ }
+
+ /* flush paths if we switch modes so that connections are restarted */
+ if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
+ set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ ipoib_warn(priv, "enabling connected mode "
+ "will cause multicast packet drops\n");
+ netdev_update_features(dev);
+ dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_real_num_tx_queues(dev, 1);
+ rtnl_unlock();
+ priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
+
+ ipoib_flush_paths(dev);
+ return (!rtnl_trylock()) ? -EBUSY : 0;
+ }
+
+ if (!strcmp(buf, "datagram\n")) {
+ clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ netdev_update_features(dev);
+ dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
+ rtnl_unlock();
+ ipoib_flush_paths(dev);
+ return (!rtnl_trylock()) ? -EBUSY : 0;
+ }
+
+ return -EINVAL;
+}
+
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node *n = priv->path_tree.rb_node;
+ struct ipoib_path *path;
+ int ret;
+
+ while (n) {
+ path = rb_entry(n, struct ipoib_path, rb_node);
+
+ ret = memcmp(gid, path->pathrec.dgid.raw,
+ sizeof (union ib_gid));
+
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ return path;
+ }
+
+ return NULL;
+}
+
+static int __path_add(struct net_device *dev, struct ipoib_path *path)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node **n = &priv->path_tree.rb_node;
+ struct rb_node *pn = NULL;
+ struct ipoib_path *tpath;
+ int ret;
+
+ while (*n) {
+ pn = *n;
+ tpath = rb_entry(pn, struct ipoib_path, rb_node);
+
+ ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
+ sizeof (union ib_gid));
+ if (ret < 0)
+ n = &pn->rb_left;
+ else if (ret > 0)
+ n = &pn->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&path->rb_node, pn, n);
+ rb_insert_color(&path->rb_node, &priv->path_tree);
+
+ list_add_tail(&path->list, &priv->path_list);
+
+ return 0;
+}
+
+static void path_free(struct net_device *dev, struct ipoib_path *path)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&path->queue)))
+ dev_kfree_skb_irq(skb);
+
+ ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
+
+ /* remove all neigh connected to this path */
+ ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
+
+ if (path->ah)
+ ipoib_put_ah(path->ah);
+
+ kfree(path);
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
+{
+ struct ipoib_path_iter *iter;
+
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return NULL;
+
+ iter->dev = dev;
+ memset(iter->path.pathrec.dgid.raw, 0, 16);
+
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+int ipoib_path_iter_next(struct ipoib_path_iter *iter)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
+ struct rb_node *n;
+ struct ipoib_path *path;
+ int ret = 1;
+
+ spin_lock_irq(&priv->lock);
+
+ n = rb_first(&priv->path_tree);
+
+ while (n) {
+ path = rb_entry(n, struct ipoib_path, rb_node);
+
+ if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
+ sizeof (union ib_gid)) < 0) {
+ iter->path = *path;
+ ret = 0;
+ break;
+ }
+
+ n = rb_next(n);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+ struct ipoib_path *path)
+{
+ *path = iter->path;
+}
+
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+
+void ipoib_mark_paths_invalid(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path, *tp;
+
+ spin_lock_irq(&priv->lock);
+
+ list_for_each_entry_safe(path, tp, &priv->path_list, list) {
+ ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
+ path->pathrec.dgid.raw);
+ if (path->ah)
+ path->ah->valid = 0;
+ }
+
+ spin_unlock_irq(&priv->lock);
+}
+
+static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
+{
+ struct ipoib_pseudo_header *phdr;
+
+ phdr = skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+}
+
+void ipoib_flush_paths(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path, *tp;
+ LIST_HEAD(remove_list);
+ unsigned long flags;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_splice_init(&priv->path_list, &remove_list);
+
+ list_for_each_entry(path, &remove_list, list)
+ rb_erase(&path->rb_node, &priv->path_tree);
+
+ list_for_each_entry_safe(path, tp, &remove_list, list) {
+ if (path->query)
+ ib_sa_cancel_query(path->query_id, path->query);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+ wait_for_completion(&path->done);
+ path_free(dev, path);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+}
+
+static void path_rec_completion(int status,
+ struct sa_path_rec *pathrec,
+ void *path_ptr)
+{
+ struct ipoib_path *path = path_ptr;
+ struct net_device *dev = path->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_ah *ah = NULL;
+ struct ipoib_ah *old_ah = NULL;
+ struct ipoib_neigh *neigh, *tn;
+ struct sk_buff_head skqueue;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (!status)
+ ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
+ be32_to_cpu(sa_path_get_dlid(pathrec)),
+ pathrec->dgid.raw);
+ else
+ ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
+ status, path->pathrec.dgid.raw);
+
+ skb_queue_head_init(&skqueue);
+
+ if (!status) {
+ struct rdma_ah_attr av;
+
+ if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
+ pathrec, &av, NULL)) {
+ ah = ipoib_create_ah(dev, priv->pd, &av);
+ rdma_destroy_ah_attr(&av);
+ }
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
+ path->pathrec = *pathrec;
+
+ old_ah = path->ah;
+ path->ah = ah;
+
+ ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
+ ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
+ pathrec->sl);
+
+ while ((skb = __skb_dequeue(&path->queue)))
+ __skb_queue_tail(&skqueue, skb);
+
+ list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
+ if (neigh->ah) {
+ WARN_ON(neigh->ah != old_ah);
+ /*
+ * Dropping the ah reference inside
+ * priv->lock is safe here, because we
+ * will hold one more reference from
+ * the original value of path->ah (ie
+ * old_ah).
+ */
+ ipoib_put_ah(neigh->ah);
+ }
+ kref_get(&path->ah->ref);
+ neigh->ah = path->ah;
+
+ if (ipoib_cm_enabled(dev, neigh->daddr)) {
+ if (!ipoib_cm_get(neigh))
+ ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
+ path,
+ neigh));
+ if (!ipoib_cm_get(neigh)) {
+ ipoib_neigh_free(neigh);
+ continue;
+ }
+ }
+
+ while ((skb = __skb_dequeue(&neigh->queue)))
+ __skb_queue_tail(&skqueue, skb);
+ }
+ path->ah->valid = 1;
+ }
+
+ path->query = NULL;
+ complete(&path->done);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (IS_ERR_OR_NULL(ah))
+ ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
+
+ if (old_ah)
+ ipoib_put_ah(old_ah);
+
+ while ((skb = __skb_dequeue(&skqueue))) {
+ int ret;
+ skb->dev = dev;
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
+ }
+}
+
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
+ void *gid)
+{
+ path->dev = priv->dev;
+
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
+
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.numb_path = 1;
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+}
+
+static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path;
+
+ if (!priv->broadcast)
+ return NULL;
+
+ path = kzalloc(sizeof(*path), GFP_ATOMIC);
+ if (!path)
+ return NULL;
+
+ skb_queue_head_init(&path->queue);
+
+ INIT_LIST_HEAD(&path->neigh_list);
+
+ init_path_rec(priv, path, gid);
+
+ return path;
+}
+
+static int path_rec_start(struct net_device *dev,
+ struct ipoib_path *path)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "Start path record lookup for %pI6\n",
+ path->pathrec.dgid.raw);
+
+ init_completion(&path->done);
+
+ path->query_id =
+ ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
+ &path->pathrec,
+ IB_SA_PATH_REC_DGID |
+ IB_SA_PATH_REC_SGID |
+ IB_SA_PATH_REC_NUMB_PATH |
+ IB_SA_PATH_REC_TRAFFIC_CLASS |
+ IB_SA_PATH_REC_PKEY,
+ 1000, GFP_ATOMIC,
+ path_rec_completion,
+ path, &path->query);
+ if (path->query_id < 0) {
+ ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
+ path->query = NULL;
+ complete(&path->done);
+ return path->query_id;
+ }
+
+ return 0;
+}
+
+static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ path = __path_find(dev, daddr + 4);
+ if (!path)
+ goto out;
+ if (!path->query)
+ path_rec_start(dev, path);
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_path *path;
+ struct ipoib_neigh *neigh;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ neigh = ipoib_neigh_alloc(daddr, dev);
+ if (!neigh) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
+ }
+
+ path = __path_find(dev, daddr + 4);
+ if (!path) {
+ path = path_rec_create(dev, daddr + 4);
+ if (!path)
+ goto err_path;
+
+ __path_add(dev, path);
+ }
+
+ list_add_tail(&neigh->list, &path->neigh_list);
+
+ if (path->ah && path->ah->valid) {
+ kref_get(&path->ah->ref);
+ neigh->ah = path->ah;
+
+ if (ipoib_cm_enabled(dev, neigh->daddr)) {
+ if (!ipoib_cm_get(neigh))
+ ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
+ if (!ipoib_cm_get(neigh)) {
+ ipoib_neigh_free(neigh);
+ goto err_drop;
+ }
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, neigh->daddr);
+ __skb_queue_tail(&neigh->queue, skb);
+ } else {
+ ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
+ skb_queue_len(&neigh->queue));
+ goto err_drop;
+ }
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(daddr));
+ ipoib_neigh_put(neigh);
+ return NULL;
+ }
+ } else {
+ neigh->ah = NULL;
+
+ if (!path->query && path_rec_start(dev, path))
+ goto err_path;
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, neigh->daddr);
+ __skb_queue_tail(&neigh->queue, skb);
+ } else {
+ goto err_drop;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_neigh_put(neigh);
+ return NULL;
+
+err_path:
+ ipoib_neigh_free(neigh);
+err_drop:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_neigh_put(neigh);
+
+ return NULL;
+}
+
+static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+ struct ipoib_pseudo_header *phdr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_path *path;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* no broadcast means that all paths are (going to be) not valid */
+ if (!priv->broadcast)
+ goto drop_and_unlock;
+
+ path = __path_find(dev, phdr->hwaddr + 4);
+ if (!path || !path->ah || !path->ah->valid) {
+ if (!path) {
+ path = path_rec_create(dev, phdr->hwaddr + 4);
+ if (!path)
+ goto drop_and_unlock;
+ __path_add(dev, path);
+ } else {
+ /*
+ * make sure there are no changes in the existing
+ * path record
+ */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+ }
+ if (!path->query && path_rec_start(dev, path)) {
+ goto drop_and_unlock;
+ }
+
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
+ __skb_queue_tail(&path->queue, skb);
+ goto unlock;
+ } else {
+ goto drop_and_unlock;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_dbg(priv, "Send unicast ARP to %08x\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
+ return;
+
+drop_and_unlock:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_neigh *neigh;
+ struct ipoib_pseudo_header *phdr;
+ struct ipoib_header *header;
+ unsigned long flags;
+
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
+ header = (struct ipoib_header *) skb->data;
+
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
+ /* multicast, arrange "if" according to probability */
+ if ((header->proto != htons(ETH_P_IP)) &&
+ (header->proto != htons(ETH_P_IPV6)) &&
+ (header->proto != htons(ETH_P_ARP)) &&
+ (header->proto != htons(ETH_P_RARP)) &&
+ (header->proto != htons(ETH_P_TIPC))) {
+ /* ethertype not supported by IPoIB */
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ /* Add in the P_Key for multicast*/
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
+
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (likely(neigh))
+ goto send_using_neigh;
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* unicast, arrange "switch" according to probability */
+ switch (header->proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ case htons(ETH_P_TIPC):
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (unlikely(!neigh)) {
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
+ }
+ break;
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_RARP):
+ /* for unicast ARP and RARP should always perform path find */
+ unicast_arp_send(skb, dev, phdr);
+ return NETDEV_TX_OK;
+ default:
+ /* ethertype not supported by IPoIB */
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+send_using_neigh:
+ /* note we now hold a ref to neigh */
+ if (ipoib_cm_get(neigh)) {
+ if (ipoib_cm_up(neigh)) {
+ ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
+ goto unref;
+ }
+ } else if (neigh->ah && neigh->ah->valid) {
+ neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
+ goto unref;
+ } else if (neigh->ah) {
+ neigh_refresh_path(neigh, phdr->hwaddr, dev);
+ }
+
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
+ spin_lock_irqsave(&priv->lock, flags);
+ __skb_queue_tail(&neigh->queue, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
+
+unref:
+ ipoib_neigh_put(neigh);
+
+ return NETDEV_TX_OK;
+}
+
+static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
+ jiffies_to_msecs(jiffies - dev_trans_start(dev)));
+ ipoib_warn(priv,
+ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+ priv->global_tx_head, priv->global_tx_tail);
+
+ /* XXX reset QP, etc. */
+}
+
+static int ipoib_hard_header(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type,
+ const void *daddr,
+ const void *saddr,
+ unsigned int len)
+{
+ struct ipoib_header *header;
+
+ header = skb_push(skb, sizeof(*header));
+
+ header->proto = htons(type);
+ header->reserved = 0;
+
+ /*
+ * we don't rely on dst_entry structure, always stuff the
+ * destination address into skb hard header so we can figure out where
+ * to send the packet later.
+ */
+ push_pseudo_header(skb, daddr);
+
+ return IPOIB_HARD_LEN;
+}
+
+static void ipoib_set_mcast_list(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
+ ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
+ return;
+ }
+
+ queue_work(priv->wq, &priv->restart_task);
+}
+
+static int ipoib_get_iflink(const struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ /* parent interface */
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ return dev->ifindex;
+
+ /* child/vlan interface */
+ return priv->parent->ifindex;
+}
+
+static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
+{
+ /*
+ * Use only the address parts that contributes to spreading
+ * The subnet prefix is not used as one can not connect to
+ * same remote port (GUID) using the same remote QPN via two
+ * different subnets.
+ */
+ /* qpn octets[1:4) & port GUID octets[12:20) */
+ u32 *d32 = (u32 *) daddr;
+ u32 hv;
+
+ hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
+ return hv & htbl->mask;
+}
+
+struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh *neigh = NULL;
+ u32 hash_val;
+
+ rcu_read_lock_bh();
+
+ htbl = rcu_dereference_bh(ntbl->htbl);
+
+ if (!htbl)
+ goto out_unlock;
+
+ hash_val = ipoib_addr_hash(htbl, daddr);
+ for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
+ neigh != NULL;
+ neigh = rcu_dereference_bh(neigh->hnext)) {
+ if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
+ /* found, take one ref on behalf of the caller */
+ if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ /* deleted */
+ neigh = NULL;
+ goto out_unlock;
+ }
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ rcu_read_unlock_bh();
+ return neigh;
+}
+
+static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long neigh_obsolete;
+ unsigned long dt;
+ unsigned long flags;
+ int i;
+ LIST_HEAD(remove_list);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+
+ if (!htbl)
+ goto out_unlock;
+
+ /* neigh is obsolete if it was idle for two GC periods */
+ dt = 2 * arp_tbl.gc_interval;
+ neigh_obsolete = jiffies - dt;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) != NULL) {
+ /* was the neigh idle for two GC periods */
+ if (time_after(neigh_obsolete, neigh->alive)) {
+
+ ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
+
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from path/mc list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ } else {
+ np = &neigh->hnext;
+ }
+
+ }
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_mcast_remove_list(&remove_list);
+}
+
+static void ipoib_reap_neigh(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
+
+ __ipoib_reap_neigh(priv);
+
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
+}
+
+
+static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_neigh *neigh;
+
+ neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
+ if (!neigh)
+ return NULL;
+
+ neigh->dev = dev;
+ memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
+ skb_queue_head_init(&neigh->queue);
+ INIT_LIST_HEAD(&neigh->list);
+ ipoib_cm_set(neigh, NULL);
+ /* one ref on behalf of the caller */
+ atomic_set(&neigh->refcnt, 1);
+
+ return neigh;
+}
+
+struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh *neigh;
+ u32 hash_val;
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+ if (!htbl) {
+ neigh = NULL;
+ goto out_unlock;
+ }
+
+ /* need to add a new neigh, but maybe some other thread succeeded?
+ * recalc hash, maybe hash resize took place so we do a search
+ */
+ hash_val = ipoib_addr_hash(htbl, daddr);
+ for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
+ lockdep_is_held(&priv->lock));
+ neigh != NULL;
+ neigh = rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock))) {
+ if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
+ /* found, take one ref on behalf of the caller */
+ if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ /* deleted */
+ neigh = NULL;
+ break;
+ }
+ neigh->alive = jiffies;
+ goto out_unlock;
+ }
+ }
+
+ neigh = ipoib_neigh_ctor(daddr, dev);
+ if (!neigh)
+ goto out_unlock;
+
+ /* one ref on behalf of the hash table */
+ atomic_inc(&neigh->refcnt);
+ neigh->alive = jiffies;
+ /* put in hash */
+ rcu_assign_pointer(neigh->hnext,
+ rcu_dereference_protected(htbl->buckets[hash_val],
+ lockdep_is_held(&priv->lock)));
+ rcu_assign_pointer(htbl->buckets[hash_val], neigh);
+ atomic_inc(&ntbl->entries);
+
+out_unlock:
+
+ return neigh;
+}
+
+void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
+{
+ /* neigh reference count was dropprd to zero */
+ struct net_device *dev = neigh->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sk_buff *skb;
+ if (neigh->ah)
+ ipoib_put_ah(neigh->ah);
+ while ((skb = __skb_dequeue(&neigh->queue))) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
+ if (ipoib_cm_get(neigh))
+ ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
+ ipoib_dbg(ipoib_priv(dev),
+ "neigh free for %06x %pI6\n",
+ IPOIB_QPN(neigh->daddr),
+ neigh->daddr + 4);
+ kfree(neigh);
+ if (atomic_dec_and_test(&priv->ntbl.entries)) {
+ if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
+ complete(&priv->ntbl.flushed);
+ }
+}
+
+static void ipoib_neigh_reclaim(struct rcu_head *rp)
+{
+ /* Called as a result of removal from hash table */
+ struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
+ /* note TX context may hold another ref */
+ ipoib_neigh_put(neigh);
+}
+
+void ipoib_neigh_free(struct ipoib_neigh *neigh)
+{
+ struct net_device *dev = neigh->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh __rcu **np;
+ struct ipoib_neigh *n;
+ u32 hash_val;
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+ if (!htbl)
+ return;
+
+ hash_val = ipoib_addr_hash(htbl, neigh->daddr);
+ np = &htbl->buckets[hash_val];
+ for (n = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock));
+ n != NULL;
+ n = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) {
+ if (n == neigh) {
+ /* found */
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from parent list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ return;
+ } else {
+ np = &n->hnext;
+ }
+ }
+}
+
+static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh __rcu **buckets;
+ u32 size;
+
+ clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
+ ntbl->htbl = NULL;
+ htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
+ if (!htbl)
+ return -ENOMEM;
+ size = roundup_pow_of_two(arp_tbl.gc_thresh3);
+ buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
+ if (!buckets) {
+ kfree(htbl);
+ return -ENOMEM;
+ }
+ htbl->size = size;
+ htbl->mask = (size - 1);
+ htbl->buckets = buckets;
+ RCU_INIT_POINTER(ntbl->htbl, htbl);
+ htbl->ntbl = ntbl;
+ atomic_set(&ntbl->entries, 0);
+
+ /* start garbage collection */
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
+
+ return 0;
+}
+
+static void neigh_hash_free_rcu(struct rcu_head *head)
+{
+ struct ipoib_neigh_hash *htbl = container_of(head,
+ struct ipoib_neigh_hash,
+ rcu);
+ struct ipoib_neigh __rcu **buckets = htbl->buckets;
+ struct ipoib_neigh_table *ntbl = htbl->ntbl;
+
+ kvfree(buckets);
+ kfree(htbl);
+ complete(&ntbl->deleted);
+}
+
+void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long flags;
+ int i;
+
+ /* remove all neigh connected to a given path or mcast */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+
+ if (!htbl)
+ goto out_unlock;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) != NULL) {
+ /* delete neighs belong to this parent */
+ if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from parent list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ } else {
+ np = &neigh->hnext;
+ }
+
+ }
+ }
+out_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long flags;
+ int i, wait_flushed = 0;
+
+ init_completion(&priv->ntbl.flushed);
+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&priv->lock));
+ if (!htbl)
+ goto out_unlock;
+
+ wait_flushed = atomic_read(&priv->ntbl.entries);
+ if (!wait_flushed)
+ goto free_htbl;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&priv->lock))) != NULL) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&priv->lock)));
+ /* remove from path/mc list */
+ list_del_init(&neigh->list);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ }
+ }
+
+free_htbl:
+ rcu_assign_pointer(ntbl->htbl, NULL);
+ call_rcu(&htbl->rcu, neigh_hash_free_rcu);
+
+out_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (wait_flushed)
+ wait_for_completion(&priv->ntbl.flushed);
+}
+
+static void ipoib_neigh_hash_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg(priv, "%s\n", __func__);
+ init_completion(&priv->ntbl.deleted);
+
+ cancel_delayed_work_sync(&priv->neigh_reap_task);
+
+ ipoib_flush_neighs(priv);
+
+ wait_for_completion(&priv->ntbl.deleted);
+}
+
+static void ipoib_napi_add(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
+ netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
+}
+
+static void ipoib_napi_del(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_del(&priv->recv_napi);
+ netif_napi_del(&priv->send_napi);
+}
+
+static void ipoib_dev_uninit_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_transport_dev_cleanup(dev);
+
+ ipoib_napi_del(dev);
+
+ ipoib_cm_dev_cleanup(dev);
+
+ kfree(priv->rx_ring);
+ vfree(priv->tx_ring);
+
+ priv->rx_ring = NULL;
+ priv->tx_ring = NULL;
+}
+
+static int ipoib_dev_init_default(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_napi_add(dev);
+
+ /* Allocate RX/TX "rings" to hold queued skbs */
+ priv->rx_ring = kcalloc(ipoib_recvq_size,
+ sizeof(*priv->rx_ring),
+ GFP_KERNEL);
+ if (!priv->rx_ring)
+ goto out;
+
+ priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
+ sizeof(*priv->tx_ring)));
+ if (!priv->tx_ring) {
+ pr_warn("%s: failed to allocate TX ring (%d entries)\n",
+ priv->ca->name, ipoib_sendq_size);
+ goto out_rx_ring_cleanup;
+ }
+
+ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
+
+ if (ipoib_transport_dev_init(dev, priv->ca)) {
+ pr_warn("%s: ipoib_transport_dev_init failed\n",
+ priv->ca->name);
+ goto out_tx_ring_cleanup;
+ }
+
+ /* after qp created set dev address */
+ priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
+ priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
+ priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
+
+ return 0;
+
+out_tx_ring_cleanup:
+ vfree(priv->tx_ring);
+
+out_rx_ring_cleanup:
+ kfree(priv->rx_ring);
+
+out:
+ ipoib_napi_del(dev);
+ return -ENOMEM;
+}
+
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->rn_ops->ndo_do_ioctl)
+ return -EOPNOTSUPP;
+
+ return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
+}
+
+static int ipoib_dev_init(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = -ENOMEM;
+
+ priv->qp = NULL;
+
+ /*
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
+ */
+ priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
+ if (!priv->wq) {
+ pr_warn("%s: failed to allocate device WQ\n", dev->name);
+ goto out;
+ }
+
+ /* create pd, which used both for control and datapath*/
+ priv->pd = ib_alloc_pd(priv->ca, 0);
+ if (IS_ERR(priv->pd)) {
+ pr_warn("%s: failed to allocate PD\n", priv->ca->name);
+ goto clean_wq;
+ }
+
+ ret = priv->rn_ops->ndo_init(dev);
+ if (ret) {
+ pr_warn("%s failed to init HW resource\n", dev->name);
+ goto out_free_pd;
+ }
+
+ ret = ipoib_neigh_hash_init(priv);
+ if (ret) {
+ pr_warn("%s failed to init neigh hash\n", dev->name);
+ goto out_dev_uninit;
+ }
+
+ if (dev->flags & IFF_UP) {
+ if (ipoib_ib_dev_open(dev)) {
+ pr_warn("%s failed to open device\n", dev->name);
+ ret = -ENODEV;
+ goto out_hash_uninit;
+ }
+ }
+
+ return 0;
+
+out_hash_uninit:
+ ipoib_neigh_hash_uninit(dev);
+
+out_dev_uninit:
+ ipoib_ib_dev_cleanup(dev);
+
+out_free_pd:
+ if (priv->pd) {
+ ib_dealloc_pd(priv->pd);
+ priv->pd = NULL;
+ }
+
+clean_wq:
+ if (priv->wq) {
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * This must be called before doing an unregister_netdev on a parent device to
+ * shutdown the IB event handler.
+ */
+static void ipoib_parent_unregister_pre(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ /*
+ * ipoib_set_mac checks netif_running before pushing work, clearing
+ * running ensures the it will not add more work.
+ */
+ rtnl_lock();
+ dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
+ rtnl_unlock();
+
+ /* ipoib_event() cannot be running once this returns */
+ ib_unregister_event_handler(&priv->event_handler);
+
+ /*
+ * Work on the queue grabs the rtnl lock, so this cannot be done while
+ * also holding it.
+ */
+ flush_workqueue(ipoib_workqueue);
+}
+
+static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
+{
+ priv->hca_caps = priv->ca->attrs.device_cap_flags;
+
+ if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+ priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ if (priv->hca_caps & IB_DEVICE_UD_TSO)
+ priv->dev->hw_features |= NETIF_F_TSO;
+
+ priv->dev->features |= priv->dev->hw_features;
+ }
+}
+
+static int ipoib_parent_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ib_port_attr attr;
+ int result;
+
+ result = ib_query_port(priv->ca, priv->port, &attr);
+ if (result) {
+ pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
+ priv->port);
+ return result;
+ }
+ priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
+
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+
+ result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
+ if (result) {
+ pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
+ priv->ca->name, priv->port, result);
+ return result;
+ }
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+ sizeof(union ib_gid));
+
+ SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_port = priv->port - 1;
+ /* Let's set this one too for backwards compatibility. */
+ priv->dev->dev_id = priv->port - 1;
+
+ return 0;
+}
+
+static void ipoib_child_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
+ set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ if (memchr_inv(priv->dev->dev_addr, 0, INFINIBAND_ALEN))
+ memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
+ sizeof(priv->local_gid));
+ else {
+ memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr,
+ INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid,
+ sizeof(priv->local_gid));
+ }
+}
+
+static int ipoib_ndo_init(struct net_device *ndev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ int rc;
+ struct rdma_netdev *rn = netdev_priv(ndev);
+
+ if (priv->parent) {
+ ipoib_child_init(ndev);
+ } else {
+ rc = ipoib_parent_init(ndev);
+ if (rc)
+ return rc;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ rn->mtu = priv->mcast_mtu;
+ ndev->max_mtu = IPOIB_CM_MTU;
+
+ ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ priv->pkey |= 0x8000;
+
+ ndev->broadcast[8] = priv->pkey >> 8;
+ ndev->broadcast[9] = priv->pkey & 0xff;
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ ipoib_set_dev_features(priv);
+
+ rc = ipoib_dev_init(ndev);
+ if (rc) {
+ pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
+ priv->ca->name, priv->dev->name, priv->port, rc);
+ return rc;
+ }
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ dev_hold(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_add_tail(&priv->list, &ppriv->child_intfs);
+ up_write(&ppriv->vlan_rwsem);
+ }
+
+ return 0;
+}
+
+static void ipoib_ndo_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ASSERT_RTNL();
+
+ /*
+ * ipoib_remove_one guarantees the children are removed before the
+ * parent, and that is the only place where a parent can be removed.
+ */
+ WARN_ON(!list_empty(&priv->child_intfs));
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+ }
+
+ ipoib_neigh_hash_uninit(dev);
+
+ ipoib_ib_dev_cleanup(dev);
+
+ /* no more works over the priv->wq */
+ if (priv->wq) {
+ /* See ipoib_mcast_carrier_on_task() */
+ WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
+
+ if (priv->parent)
+ dev_put(priv->parent);
+}
+
+static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
+}
+
+static int ipoib_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int err;
+
+ err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
+ if (err)
+ return err;
+
+ ivf->vf = vf;
+ memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
+
+ return 0;
+}
+
+static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
+ return -EINVAL;
+
+ return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
+}
+
+static int ipoib_get_vf_guid(struct net_device *dev, int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
+}
+
+static int ipoib_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
+}
+
+static const struct header_ops ipoib_header_ops = {
+ .create = ipoib_hard_header,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_pf = {
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
+ .ndo_open = ipoib_open,
+ .ndo_stop = ipoib_stop,
+ .ndo_change_mtu = ipoib_change_mtu,
+ .ndo_fix_features = ipoib_fix_features,
+ .ndo_start_xmit = ipoib_start_xmit,
+ .ndo_tx_timeout = ipoib_timeout,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
+ .ndo_set_vf_link_state = ipoib_set_vf_link_state,
+ .ndo_get_vf_config = ipoib_get_vf_config,
+ .ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_get_vf_guid = ipoib_get_vf_guid,
+ .ndo_set_vf_guid = ipoib_set_vf_guid,
+ .ndo_set_mac_address = ipoib_set_mac,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_do_ioctl = ipoib_ioctl,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_vf = {
+ .ndo_init = ipoib_ndo_init,
+ .ndo_uninit = ipoib_ndo_uninit,
+ .ndo_open = ipoib_open,
+ .ndo_stop = ipoib_stop,
+ .ndo_change_mtu = ipoib_change_mtu,
+ .ndo_fix_features = ipoib_fix_features,
+ .ndo_start_xmit = ipoib_start_xmit,
+ .ndo_tx_timeout = ipoib_timeout,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_do_ioctl = ipoib_ioctl,
+};
+
+static const struct net_device_ops ipoib_netdev_default_pf = {
+ .ndo_init = ipoib_dev_init_default,
+ .ndo_uninit = ipoib_dev_uninit_default,
+ .ndo_open = ipoib_ib_dev_open_default,
+ .ndo_stop = ipoib_ib_dev_stop_default,
+};
+
+void ipoib_setup_common(struct net_device *dev)
+{
+ dev->header_ops = &ipoib_header_ops;
+ dev->netdev_ops = &ipoib_netdev_default_pf;
+
+ ipoib_set_ethtool_ops(dev);
+
+ dev->watchdog_timeo = HZ;
+
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+
+ dev->hard_header_len = IPOIB_HARD_LEN;
+ dev->addr_len = INFINIBAND_ALEN;
+ dev->type = ARPHRD_INFINIBAND;
+ dev->tx_queue_len = ipoib_sendq_size * 2;
+ dev->features = (NETIF_F_VLAN_CHALLENGED |
+ NETIF_F_HIGHDMA);
+ netif_keep_dst(dev);
+
+ memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
+
+ /*
+ * unregister_netdev always frees the netdev, we use this mode
+ * consistently to unify all the various unregister paths, including
+ * those connected to rtnl_link_ops which require it.
+ */
+ dev->needs_free_netdev = true;
+}
+
+static void ipoib_build_priv(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ priv->dev = dev;
+ spin_lock_init(&priv->lock);
+ init_rwsem(&priv->vlan_rwsem);
+ mutex_init(&priv->mcast_mutex);
+
+ INIT_LIST_HEAD(&priv->path_list);
+ INIT_LIST_HEAD(&priv->child_intfs);
+ INIT_LIST_HEAD(&priv->dead_ahs);
+ INIT_LIST_HEAD(&priv->multicast_list);
+
+ INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
+ INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
+ INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
+ INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
+ INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
+ INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
+ INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
+}
+
+static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
+ const char *name)
+{
+ struct net_device *dev;
+
+ dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common);
+ if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
+ return dev;
+
+ dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
+ ipoib_setup_common);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ return dev;
+}
+
+int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
+ struct net_device *dev)
+{
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_dev_priv *priv;
+ int rc;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ca = hca;
+ priv->port = port;
+
+ rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common, dev);
+ if (rc) {
+ if (rc != -EOPNOTSUPP)
+ goto out;
+
+ rn->send = ipoib_send;
+ rn->attach_mcast = ipoib_mcast_attach;
+ rn->detach_mcast = ipoib_mcast_detach;
+ rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
+ }
+
+ priv->rn_ops = dev->netdev_ops;
+
+ if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION)
+ dev->netdev_ops = &ipoib_netdev_ops_vf;
+ else
+ dev->netdev_ops = &ipoib_netdev_ops_pf;
+
+ rn->clnt_priv = priv;
+ /*
+ * Only the child register_netdev flows can handle priv_destructor
+ * being set, so we force it to NULL here and handle manually until it
+ * is safe to turn on.
+ */
+ priv->next_priv_destructor = dev->priv_destructor;
+ dev->priv_destructor = NULL;
+
+ ipoib_build_priv(dev);
+
+ return 0;
+
+out:
+ kfree(priv);
+ return rc;
+}
+
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+ const char *name)
+{
+ struct net_device *dev;
+ int rc;
+
+ dev = ipoib_alloc_netdev(hca, port, name);
+ if (IS_ERR(dev))
+ return dev;
+
+ rc = ipoib_intf_init(hca, port, name, dev);
+ if (rc) {
+ free_netdev(dev);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * Upon success the caller must ensure ipoib_intf_free is called or
+ * register_netdevice succeed'd and priv_destructor is set to
+ * ipoib_intf_free.
+ */
+ return dev;
+}
+
+void ipoib_intf_free(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ dev->priv_destructor = priv->next_priv_destructor;
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+
+ /*
+ * There are some error flows around register_netdev failing that may
+ * attempt to call priv_destructor twice, prevent that from happening.
+ */
+ dev->priv_destructor = NULL;
+
+ /* unregister/destroy is very complicated. Make bugs more obvious. */
+ rn->clnt_priv = NULL;
+
+ kfree(priv);
+}
+
+static ssize_t show_pkey(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ return sprintf(buf, "0x%04x\n", priv->pkey);
+}
+static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+
+static ssize_t show_umcast(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
+}
+
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+
+ if (umcast_val > 0) {
+ set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+ ipoib_warn(priv, "ignoring multicast groups joined directly "
+ "by userspace\n");
+ } else
+ clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+}
+
+static ssize_t set_umcast(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
+
+ ipoib_set_umcast(to_net_dev(dev), umcast_val);
+
+ return count;
+}
+static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
+
+int ipoib_add_umcast_attr(struct net_device *dev)
+{
+ return device_create_file(&dev->dev, &dev_attr_umcast);
+}
+
+static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *netdev = priv->dev;
+
+ netif_addr_lock_bh(netdev);
+
+ memcpy(&priv->local_gid.global.interface_id,
+ &gid->global.interface_id,
+ sizeof(gid->global.interface_id));
+ memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
+ clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ netif_addr_unlock_bh(netdev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(child_priv, &priv->child_intfs, list)
+ set_base_guid(child_priv, gid);
+ up_read(&priv->vlan_rwsem);
+ }
+}
+
+static int ipoib_check_lladdr(struct net_device *dev,
+ struct sockaddr_storage *ss)
+{
+ union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
+ int ret = 0;
+
+ netif_addr_lock_bh(dev);
+
+ /* Make sure the QPN, reserved and subnet prefix match the current
+ * lladdr, it also makes sure the lladdr is unicast.
+ */
+ if (memcmp(dev->dev_addr, ss->__data,
+ 4 + sizeof(gid->global.subnet_prefix)) ||
+ gid->global.interface_id == 0)
+ ret = -EINVAL;
+
+ netif_addr_unlock_bh(dev);
+
+ return ret;
+}
+
+static int ipoib_set_mac(struct net_device *dev, void *addr)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct sockaddr_storage *ss = addr;
+ int ret;
+
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
+ return -EBUSY;
+
+ ret = ipoib_check_lladdr(dev, ss);
+ if (ret)
+ return ret;
+
+ set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+
+ queue_work(ipoib_workqueue, &priv->flush_light);
+
+ return 0;
+}
+
+static ssize_t create_child(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int pkey;
+ int ret;
+
+ if (sscanf(buf, "%i", &pkey) != 1)
+ return -EINVAL;
+
+ if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
+ return -EINVAL;
+
+ ret = ipoib_vlan_add(to_net_dev(dev), pkey);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
+
+static ssize_t delete_child(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int pkey;
+ int ret;
+
+ if (sscanf(buf, "%i", &pkey) != 1)
+ return -EINVAL;
+
+ if (pkey < 0 || pkey > 0xffff)
+ return -EINVAL;
+
+ ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
+
+ return ret ? ret : count;
+
+}
+static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
+
+int ipoib_add_pkey_attr(struct net_device *dev)
+{
+ return device_create_file(&dev->dev, &dev_attr_pkey);
+}
+
+/*
+ * We erroneously exposed the iface's port number in the dev_id
+ * sysfs field long after dev_port was introduced for that purpose[1],
+ * and we need to stop everyone from relying on that.
+ * Let's overload the shower routine for the dev_id file here
+ * to gently bring the issue up.
+ *
+ * [1] https://www.spinics.net/lists/netdev/msg272123.html
+ */
+static ssize_t dev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+
+ /*
+ * ndev->dev_port will be equal to 0 in old kernel prior to commit
+ * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
+ * port numbers") Zero was chosen as special case for user space
+ * applications to fallback and query dev_id to check if it has
+ * different value or not.
+ *
+ * Don't print warning in such scenario.
+ *
+ * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
+ */
+ if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
+ netdev_info_once(ndev,
+ "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
+ current->comm);
+
+ return sprintf(buf, "%#x\n", ndev->dev_id);
+}
+static DEVICE_ATTR_RO(dev_id);
+
+static int ipoib_intercept_dev_id_attr(struct net_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_dev_id);
+ return device_create_file(&dev->dev, &dev_attr_dev_id);
+}
+
+static struct net_device *ipoib_add_port(const char *format,
+ struct ib_device *hca, u8 port)
+{
+ struct rtnl_link_ops *ops = ipoib_get_link_ops();
+ struct rdma_netdev_alloc_params params;
+ struct ipoib_dev_priv *priv;
+ struct net_device *ndev;
+ int result;
+
+ ndev = ipoib_intf_alloc(hca, port, format);
+ if (IS_ERR(ndev)) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
+ PTR_ERR(ndev));
+ return ndev;
+ }
+ priv = ipoib_priv(ndev);
+
+ INIT_IB_EVENT_HANDLER(&priv->event_handler,
+ priv->ca, ipoib_event);
+ ib_register_event_handler(&priv->event_handler);
+
+ /* call event handler to ensure pkey in sync */
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
+
+ result = register_netdev(ndev);
+ if (result) {
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+ hca->name, port, result);
+
+ ipoib_parent_unregister_pre(ndev);
+ ipoib_intf_free(ndev);
+ free_netdev(ndev);
+
+ return ERR_PTR(result);
+ }
+
+ if (hca->ops.rdma_netdev_get_params) {
+ int rc = hca->ops.rdma_netdev_get_params(hca, port,
+ RDMA_NETDEV_IPOIB,
+ &params);
+
+ if (!rc && ops->priv_size < params.sizeof_priv)
+ ops->priv_size = params.sizeof_priv;
+ }
+ /*
+ * We cannot set priv_destructor before register_netdev because we
+ * need priv to be always valid during the error flow to execute
+ * ipoib_parent_unregister_pre(). Instead handle it manually and only
+ * enter priv_destructor mode once we are completely registered.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ if (ipoib_intercept_dev_id_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_cm_add_mode_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_pkey_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_umcast_attr(ndev))
+ goto sysfs_failed;
+ if (device_create_file(&ndev->dev, &dev_attr_create_child))
+ goto sysfs_failed;
+ if (device_create_file(&ndev->dev, &dev_attr_delete_child))
+ goto sysfs_failed;
+
+ return ndev;
+
+sysfs_failed:
+ ipoib_parent_unregister_pre(ndev);
+ unregister_netdev(ndev);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int ipoib_add_one(struct ib_device *device)
+{
+ struct list_head *dev_list;
+ struct net_device *dev;
+ struct ipoib_dev_priv *priv;
+ unsigned int p;
+ int count = 0;
+
+ dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
+ if (!dev_list)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(dev_list);
+
+ rdma_for_each_port (device, p) {
+ if (!rdma_protocol_ib(device, p))
+ continue;
+ dev = ipoib_add_port("ib%d", device, p);
+ if (!IS_ERR(dev)) {
+ priv = ipoib_priv(dev);
+ list_add_tail(&priv->list, dev_list);
+ count++;
+ }
+ }
+
+ if (!count) {
+ kfree(dev_list);
+ return -EOPNOTSUPP;
+ }
+
+ ib_set_client_data(device, &ipoib_client, dev_list);
+ return 0;
+}
+
+static void ipoib_remove_one(struct ib_device *device, void *client_data)
+{
+ struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
+ struct list_head *dev_list = client_data;
+
+ list_for_each_entry_safe(priv, tmp, dev_list, list) {
+ LIST_HEAD(head);
+ ipoib_parent_unregister_pre(priv->dev);
+
+ rtnl_lock();
+
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
+ list)
+ unregister_netdevice_queue(cpriv->dev, &head);
+ unregister_netdevice_queue(priv->dev, &head);
+ unregister_netdevice_many(&head);
+
+ rtnl_unlock();
+ }
+
+ kfree(dev_list);
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static struct notifier_block ipoib_netdev_notifier = {
+ .notifier_call = ipoib_netdev_event,
+};
+#endif
+
+static int __init ipoib_init_module(void)
+{
+ int ret;
+
+ ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
+ ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
+ ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
+
+ ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
+ ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
+ ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+ ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
+#endif
+
+ /*
+ * When copying small received packets, we only copy from the
+ * linear data part of the SKB, so we rely on this condition.
+ */
+ BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
+
+ ipoib_register_debugfs();
+
+ /*
+ * We create a global workqueue here that is used for all flush
+ * operations. However, if you attempt to flush a workqueue
+ * from a task on that same workqueue, it deadlocks the system.
+ * We want to be able to flush the tasks associated with a
+ * specific net device, so we also create a workqueue for each
+ * netdevice. We queue up the tasks for that device only on
+ * its private workqueue, and we only queue up flush events
+ * on our global flush workqueue. This avoids the deadlocks.
+ */
+ ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
+ if (!ipoib_workqueue) {
+ ret = -ENOMEM;
+ goto err_fs;
+ }
+
+ ib_sa_register_client(&ipoib_sa_client);
+
+ ret = ib_register_client(&ipoib_client);
+ if (ret)
+ goto err_sa;
+
+ ret = ipoib_netlink_init();
+ if (ret)
+ goto err_client;
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ register_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
+ return 0;
+
+err_client:
+ ib_unregister_client(&ipoib_client);
+
+err_sa:
+ ib_sa_unregister_client(&ipoib_sa_client);
+ destroy_workqueue(ipoib_workqueue);
+
+err_fs:
+ ipoib_unregister_debugfs();
+
+ return ret;
+}
+
+static void __exit ipoib_cleanup_module(void)
+{
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ unregister_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
+ ipoib_netlink_fini();
+ ib_unregister_client(&ipoib_client);
+ ib_sa_unregister_client(&ipoib_sa_client);
+ ipoib_unregister_debugfs();
+ destroy_workqueue(ipoib_workqueue);
+}
+
+module_init(ipoib_init_module);
+module_exit(ipoib_cleanup_module);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
new file mode 100644
index 000000000..86e4ed64e
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -0,0 +1,1062 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/igmp.h>
+#include <linux/inetdevice.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+
+#include <net/dst.h>
+
+#include "ipoib.h"
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static int mcast_debug_level;
+
+module_param(mcast_debug_level, int, 0644);
+MODULE_PARM_DESC(mcast_debug_level,
+ "Enable multicast debug tracing if > 0");
+#endif
+
+struct ipoib_mcast_iter {
+ struct net_device *dev;
+ union ib_gid mgid;
+ unsigned long created;
+ unsigned int queuelen;
+ unsigned int complete;
+ unsigned int send_only;
+};
+
+/* join state that allows creating mcg with sendonly member request */
+#define SENDONLY_FULLMEMBER_JOIN 8
+
+/*
+ * This should be called with the priv->lock held
+ */
+static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
+ struct ipoib_mcast *mcast,
+ bool delay)
+{
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+
+ /*
+ * We will be scheduling *something*, so cancel whatever is
+ * currently scheduled first
+ */
+ cancel_delayed_work(&priv->mcast_task);
+ if (mcast && delay) {
+ /*
+ * We had a failure and want to schedule a retry later
+ */
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ mcast->delay_until = jiffies + (mcast->backoff * HZ);
+ /*
+ * Mark this mcast for its delay, but restart the
+ * task immediately. The join task will make sure to
+ * clear out all entries without delays, and then
+ * schedule itself to run again when the earliest
+ * delay expires
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ } else if (delay) {
+ /*
+ * Special case of retrying after a failure to
+ * allocate the broadcast multicast group, wait
+ * 1 second and try again
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, HZ);
+ } else
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+}
+
+static void ipoib_mcast_free(struct ipoib_mcast *mcast)
+{
+ struct net_device *dev = mcast->dev;
+ int tx_dropped = 0;
+
+ ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ /* remove all neigh connected to this mcast */
+ ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
+
+ if (mcast->ah)
+ ipoib_put_ah(mcast->ah);
+
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ ++tx_dropped;
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
+ }
+
+ netif_tx_lock_bh(dev);
+ dev->stats.tx_dropped += tx_dropped;
+ netif_tx_unlock_bh(dev);
+
+ kfree(mcast);
+}
+
+static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
+{
+ struct ipoib_mcast *mcast;
+
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+ if (!mcast)
+ return NULL;
+
+ mcast->dev = dev;
+ mcast->created = jiffies;
+ mcast->delay_until = jiffies;
+ mcast->backoff = 1;
+
+ INIT_LIST_HEAD(&mcast->list);
+ INIT_LIST_HEAD(&mcast->neigh_list);
+ skb_queue_head_init(&mcast->pkt_queue);
+
+ return mcast;
+}
+
+static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node *n = priv->multicast_tree.rb_node;
+
+ while (n) {
+ struct ipoib_mcast *mcast;
+ int ret;
+
+ mcast = rb_entry(n, struct ipoib_mcast, rb_node);
+
+ ret = memcmp(mgid, mcast->mcmember.mgid.raw,
+ sizeof (union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ return mcast;
+ }
+
+ return NULL;
+}
+
+static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
+
+ while (*n) {
+ struct ipoib_mcast *tmcast;
+ int ret;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
+
+ ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
+ sizeof (union ib_gid));
+ if (ret < 0)
+ n = &pn->rb_left;
+ else if (ret > 0)
+ n = &pn->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
+
+ return 0;
+}
+
+static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+ struct ib_sa_mcmember_rec *mcmember)
+{
+ struct net_device *dev = mcast->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_ah *ah;
+ struct rdma_ah_attr av;
+ int ret;
+ int set_qkey = 0;
+ int mtu;
+
+ mcast->mcmember = *mcmember;
+
+ /* Set the multicast MTU and cached Q_Key before we attach if it's
+ * the broadcast group.
+ */
+ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ sizeof (union ib_gid))) {
+ spin_lock_irq(&priv->lock);
+ if (!priv->broadcast) {
+ spin_unlock_irq(&priv->lock);
+ return -EAGAIN;
+ }
+ /*update priv member according to the new mcast*/
+ priv->broadcast->mcmember.qkey = mcmember->qkey;
+ priv->broadcast->mcmember.mtu = mcmember->mtu;
+ priv->broadcast->mcmember.traffic_class = mcmember->traffic_class;
+ priv->broadcast->mcmember.rate = mcmember->rate;
+ priv->broadcast->mcmember.sl = mcmember->sl;
+ priv->broadcast->mcmember.flow_label = mcmember->flow_label;
+ priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
+ /* assume if the admin and the mcast are the same both can be changed */
+ mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
+ priv->broadcast->mcmember.mtu);
+ if (priv->mcast_mtu == priv->admin_mtu)
+ priv->admin_mtu = IPOIB_UD_MTU(mtu);
+ priv->mcast_mtu = IPOIB_UD_MTU(mtu);
+ rn->mtu = priv->mcast_mtu;
+
+ priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
+ spin_unlock_irq(&priv->lock);
+ priv->tx_wr.remote_qkey = priv->qkey;
+ set_qkey = 1;
+ }
+
+ if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ ipoib_warn(priv, "multicast group %pI6 already attached\n",
+ mcast->mcmember.mgid.raw);
+
+ return 0;
+ }
+
+ ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
+ be16_to_cpu(mcast->mcmember.mlid),
+ set_qkey, priv->qkey);
+ if (ret < 0) {
+ ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
+ return ret;
+ }
+ }
+
+ memset(&av, 0, sizeof(av));
+ av.type = rdma_ah_find_type(priv->ca, priv->port);
+ rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid)),
+ rdma_ah_set_port_num(&av, priv->port);
+ rdma_ah_set_sl(&av, mcast->mcmember.sl);
+ rdma_ah_set_static_rate(&av, mcast->mcmember.rate);
+
+ rdma_ah_set_grh(&av, &mcast->mcmember.mgid,
+ be32_to_cpu(mcast->mcmember.flow_label),
+ 0, mcast->mcmember.hop_limit,
+ mcast->mcmember.traffic_class);
+
+ ah = ipoib_create_ah(dev, priv->pd, &av);
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
+ }
+ spin_lock_irq(&priv->lock);
+ mcast->ah = ah;
+ spin_unlock_irq(&priv->lock);
+
+ ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
+ mcast->mcmember.mgid.raw,
+ mcast->ah->ah,
+ be16_to_cpu(mcast->mcmember.mlid),
+ mcast->mcmember.sl);
+
+ /* actually send any queued packets */
+ netif_tx_lock_bh(dev);
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
+
+ netif_tx_unlock_bh(dev);
+
+ skb->dev = dev;
+
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
+ __func__, ret);
+ netif_tx_lock_bh(dev);
+ }
+ netif_tx_unlock_bh(dev);
+
+ return 0;
+}
+
+void ipoib_mcast_carrier_on_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ carrier_on_task);
+ struct ib_port_attr attr;
+
+ if (ib_query_port(priv->ca, priv->port, &attr) ||
+ attr.state != IB_PORT_ACTIVE) {
+ ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
+ return;
+ }
+ /*
+ * Check if can send sendonly MCG's with sendonly-fullmember join state.
+ * It done here after the successfully join to the broadcast group,
+ * because the broadcast group must always be joined first and is always
+ * re-joined if the SM changes substantially.
+ */
+ priv->sm_fullmember_sendonly_support =
+ ib_sa_sendonly_fullmem_support(&ipoib_sa_client,
+ priv->ca, priv->port);
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed. However, ipoib_stop() will attempt to flush
+ * the workqueue while holding the rtnl lock, so loop
+ * on trylock until either we get the lock or we see
+ * FLAG_OPER_UP go away as that signals that we are bailing
+ * and can safely ignore the carrier on work.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
+ if (!ipoib_cm_admin_enabled(priv->dev))
+ dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
+ netif_carrier_on(priv->dev);
+ rtnl_unlock();
+}
+
+static int ipoib_mcast_join_complete(int status,
+ struct ib_sa_multicast *multicast)
+{
+ struct ipoib_mcast *mcast = multicast->context;
+ struct net_device *dev = mcast->dev;
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
+ "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+
+ /* We trap for port events ourselves. */
+ if (status == -ENETRESET) {
+ status = 0;
+ goto out;
+ }
+
+ if (!status)
+ status = ipoib_mcast_join_finish(mcast, &multicast->rec);
+
+ if (!status) {
+ mcast->backoff = 1;
+ mcast->delay_until = jiffies;
+
+ /*
+ * Defer carrier on work to priv->wq to avoid a
+ * deadlock on rtnl_lock here. Requeue our multicast
+ * work too, which will end up happening right after
+ * our carrier on task work and will allow us to
+ * send out all of the non-broadcast joins
+ */
+ if (mcast == priv->broadcast) {
+ spin_lock_irq(&priv->lock);
+ queue_work(priv->wq, &priv->carrier_on_task);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ goto out_locked;
+ }
+ } else {
+ bool silent_fail =
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ status == -EINVAL;
+
+ if (mcast->logcount < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN ||
+ silent_fail) {
+ ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ }
+
+ if (!silent_fail)
+ mcast->logcount++;
+ }
+
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ mcast->backoff >= 2) {
+ /*
+ * We only retry sendonly joins once before we drop
+ * the packet and quit trying to deal with the
+ * group. However, we leave the group in the
+ * mcast list as an unjoined group. If we want to
+ * try joining again, we simply queue up a packet
+ * and restart the join thread. The empty queue
+ * is why the join thread ignores this group.
+ */
+ mcast->backoff = 1;
+ netif_tx_lock_bh(dev);
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
+ }
+ netif_tx_unlock_bh(dev);
+ } else {
+ spin_lock_irq(&priv->lock);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ goto out_locked;
+ }
+ }
+out:
+ spin_lock_irq(&priv->lock);
+out_locked:
+ /*
+ * Make sure to set mcast->mc before we clear the busy flag to avoid
+ * racing with code that checks for BUSY before checking mcast->mc
+ */
+ if (status)
+ mcast->mc = NULL;
+ else
+ mcast->mc = multicast;
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ spin_unlock_irq(&priv->lock);
+ complete(&mcast->done);
+
+ return status;
+}
+
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_sa_multicast *multicast;
+ struct ib_sa_mcmember_rec rec = {
+ .join_state = 1
+ };
+ ib_sa_comp_mask comp_mask;
+ int ret = 0;
+
+ if (!priv->broadcast ||
+ !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return -EINVAL;
+
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
+ ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
+
+ rec.mgid = mcast->mcmember.mgid;
+ rec.port_gid = priv->local_gid;
+ rec.pkey = cpu_to_be16(priv->pkey);
+
+ comp_mask =
+ IB_SA_MCMEMBER_REC_MGID |
+ IB_SA_MCMEMBER_REC_PORT_GID |
+ IB_SA_MCMEMBER_REC_PKEY |
+ IB_SA_MCMEMBER_REC_JOIN_STATE;
+
+ if (mcast != priv->broadcast) {
+ /*
+ * RFC 4391:
+ * The MGID MUST use the same P_Key, Q_Key, SL, MTU,
+ * and HopLimit as those used in the broadcast-GID. The rest
+ * of attributes SHOULD follow the values used in the
+ * broadcast-GID as well.
+ */
+ comp_mask |=
+ IB_SA_MCMEMBER_REC_QKEY |
+ IB_SA_MCMEMBER_REC_MTU_SELECTOR |
+ IB_SA_MCMEMBER_REC_MTU |
+ IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
+ IB_SA_MCMEMBER_REC_RATE_SELECTOR |
+ IB_SA_MCMEMBER_REC_RATE |
+ IB_SA_MCMEMBER_REC_SL |
+ IB_SA_MCMEMBER_REC_FLOW_LABEL |
+ IB_SA_MCMEMBER_REC_HOP_LIMIT;
+
+ rec.qkey = priv->broadcast->mcmember.qkey;
+ rec.mtu_selector = IB_SA_EQ;
+ rec.mtu = priv->broadcast->mcmember.mtu;
+ rec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ rec.rate_selector = IB_SA_EQ;
+ rec.rate = priv->broadcast->mcmember.rate;
+ rec.sl = priv->broadcast->mcmember.sl;
+ rec.flow_label = priv->broadcast->mcmember.flow_label;
+ rec.hop_limit = priv->broadcast->mcmember.hop_limit;
+
+ /*
+ * Send-only IB Multicast joins work at the core IB layer but
+ * require specific SM support.
+ * We can use such joins here only if the current SM supports that feature.
+ * However, if not, we emulate an Ethernet multicast send,
+ * which does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
+ * do is to create the group if it doesn't exist as that
+ * most closely emulates the behavior, from a user space
+ * application perspective, of Ethernet multicast operation.
+ */
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ priv->sm_fullmember_sendonly_support)
+ /* SM supports sendonly-fullmember, otherwise fallback to full-member */
+ rec.join_state = SENDONLY_FULLMEMBER_JOIN;
+ }
+ spin_unlock_irq(&priv->lock);
+
+ multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
+ &rec, comp_mask, GFP_KERNEL,
+ ipoib_mcast_join_complete, mcast);
+ spin_lock_irq(&priv->lock);
+ if (IS_ERR(multicast)) {
+ ret = PTR_ERR(multicast);
+ ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ spin_unlock_irq(&priv->lock);
+ complete(&mcast->done);
+ spin_lock_irq(&priv->lock);
+ }
+ return 0;
+}
+
+void ipoib_mcast_join_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, mcast_task.work);
+ struct net_device *dev = priv->dev;
+ struct ib_port_attr port_attr;
+ unsigned long delay_until = 0;
+ struct ipoib_mcast *mcast = NULL;
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+
+ if (ib_query_port(priv->ca, priv->port, &port_attr)) {
+ ipoib_dbg(priv, "ib_query_port() failed\n");
+ return;
+ }
+ if (port_attr.state != IB_PORT_ACTIVE) {
+ ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
+ port_attr.state);
+ return;
+ }
+ priv->local_lid = port_attr.lid;
+ netif_addr_lock_bh(dev);
+
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ netif_addr_unlock_bh(dev);
+ return;
+ }
+ netif_addr_unlock_bh(dev);
+
+ spin_lock_irq(&priv->lock);
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ goto out;
+
+ if (!priv->broadcast) {
+ struct ipoib_mcast *broadcast;
+
+ broadcast = ipoib_mcast_alloc(dev);
+ if (!broadcast) {
+ ipoib_warn(priv, "failed to allocate broadcast group\n");
+ /*
+ * Restart us after a 1 second delay to retry
+ * creating our broadcast group and attaching to
+ * it. Until this succeeds, this ipoib dev is
+ * completely stalled (multicast wise).
+ */
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 1);
+ goto out;
+ }
+
+ memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ sizeof (union ib_gid));
+ priv->broadcast = broadcast;
+
+ __ipoib_mcast_add(dev, priv->broadcast);
+ }
+
+ if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
+ if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
+ mcast = priv->broadcast;
+ if (mcast->backoff > 1 &&
+ time_before(jiffies, mcast->delay_until)) {
+ delay_until = mcast->delay_until;
+ mcast = NULL;
+ }
+ }
+ goto out;
+ }
+
+ /*
+ * We'll never get here until the broadcast group is both allocated
+ * and attached
+ */
+ list_for_each_entry(mcast, &priv->multicast_list, list) {
+ if (IS_ERR_OR_NULL(mcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
+ (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ||
+ !skb_queue_empty(&mcast->pkt_queue))) {
+ if (mcast->backoff == 1 ||
+ time_after_eq(jiffies, mcast->delay_until)) {
+ /* Found the next unjoined group */
+ if (ipoib_mcast_join(dev, mcast)) {
+ spin_unlock_irq(&priv->lock);
+ return;
+ }
+ } else if (!delay_until ||
+ time_before(mcast->delay_until, delay_until))
+ delay_until = mcast->delay_until;
+ }
+ }
+
+ mcast = NULL;
+ ipoib_dbg_mcast(priv, "successfully started all multicast joins\n");
+
+out:
+ if (delay_until) {
+ cancel_delayed_work(&priv->mcast_task);
+ queue_delayed_work(priv->wq, &priv->mcast_task,
+ delay_until - jiffies);
+ }
+ if (mcast)
+ ipoib_mcast_join(dev, mcast);
+
+ spin_unlock_irq(&priv->lock);
+}
+
+void ipoib_mcast_start_thread(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ unsigned long flags;
+
+ ipoib_dbg_mcast(priv, "starting multicast thread\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void ipoib_mcast_stop_thread(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ ipoib_dbg_mcast(priv, "stopping multicast thread\n");
+
+ cancel_delayed_work_sync(&priv->mcast_task);
+}
+
+static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ int ret = 0;
+
+ if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
+
+ if (!IS_ERR_OR_NULL(mcast->mc))
+ ib_sa_free_multicast(mcast->mc);
+
+ if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ /* Remove ourselves from the multicast group */
+ ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
+ be16_to_cpu(mcast->mcmember.mlid));
+ if (ret)
+ ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
+ } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_dbg(priv, "leaving with no mcmember but not a "
+ "SENDONLY join\n");
+
+ return 0;
+}
+
+/*
+ * Check if the multicast group is sendonly. If so remove it from the maps
+ * and add to the remove list
+ */
+void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
+ struct list_head *remove_list)
+{
+ /* Is this multicast ? */
+ if (*mgid == 0xff) {
+ struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid);
+
+ if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, remove_list);
+ }
+ }
+}
+
+void ipoib_mcast_remove_list(struct list_head *remove_list)
+{
+ struct ipoib_mcast *mcast, *tmcast;
+
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
+ ipoib_mcast_leave(mcast->dev, mcast);
+ ipoib_mcast_free(mcast);
+ }
+}
+
+void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
+ struct ipoib_mcast *mcast;
+ unsigned long flags;
+ void *mgid = daddr + 4;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
+ !priv->broadcast ||
+ !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
+ mcast = __ipoib_mcast_find(dev, mgid);
+ if (!mcast || !mcast->ah) {
+ if (!mcast) {
+ /* Let's create a new send only group now */
+ ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
+ mgid);
+
+ mcast = ipoib_mcast_alloc(dev);
+ if (!mcast) {
+ ipoib_warn(priv, "unable to allocate memory "
+ "for multicast structure\n");
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
+ set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
+ memcpy(mcast->mcmember.mgid.raw, mgid,
+ sizeof (union ib_gid));
+ __ipoib_mcast_add(dev, mcast);
+ list_add_tail(&mcast->list, &priv->multicast_list);
+ }
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
+ skb_queue_tail(&mcast->pkt_queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
+ if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ }
+ } else {
+ struct ipoib_neigh *neigh;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ neigh = ipoib_neigh_get(dev, daddr);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!neigh) {
+ neigh = ipoib_neigh_alloc(daddr, dev);
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
+ kref_get(&mcast->ah->ref);
+ neigh->ah = mcast->ah;
+ neigh->ah->valid = 1;
+ list_add_tail(&neigh->list, &mcast->neigh_list);
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah,
+ IB_MULTICAST_QPN);
+ if (neigh)
+ ipoib_neigh_put(neigh);
+ return;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void ipoib_mcast_dev_flush(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ LIST_HEAD(remove_list);
+ struct ipoib_mcast *mcast, *tmcast;
+ unsigned long flags;
+
+ mutex_lock(&priv->mcast_mutex);
+ ipoib_dbg_mcast(priv, "flushing multicast list\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, &remove_list);
+ }
+
+ if (priv->broadcast) {
+ rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&priv->broadcast->list, &remove_list);
+ priv->broadcast = NULL;
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ipoib_mcast_remove_list(&remove_list);
+ mutex_unlock(&priv->mcast_mutex);
+}
+
+static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
+{
+ /* reserved QPN, prefix, scope */
+ if (memcmp(addr, broadcast, 6))
+ return 0;
+ /* signature lower, pkey */
+ if (memcmp(addr + 7, broadcast + 7, 3))
+ return 0;
+ return 1;
+}
+
+void ipoib_mcast_restart_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, restart_task);
+ struct net_device *dev = priv->dev;
+ struct netdev_hw_addr *ha;
+ struct ipoib_mcast *mcast, *tmcast;
+ LIST_HEAD(remove_list);
+ struct ib_sa_mcmember_rec rec;
+
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ /*
+ * shortcut...on shutdown flush is called next, just
+ * let it do all the work
+ */
+ return;
+
+ ipoib_dbg_mcast(priv, "restarting multicast task\n");
+
+ netif_addr_lock_bh(dev);
+ spin_lock_irq(&priv->lock);
+
+ /*
+ * Unfortunately, the networking core only gives us a list of all of
+ * the multicast hardware addresses. We need to figure out which ones
+ * are new and which ones have been removed
+ */
+
+ /* Clear out the found flag */
+ list_for_each_entry(mcast, &priv->multicast_list, list)
+ clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
+
+ /* Mark all of the entries that are found or don't exist */
+ netdev_for_each_mc_addr(ha, dev) {
+ union ib_gid mgid;
+
+ if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
+ continue;
+
+ memcpy(mgid.raw, ha->addr + 4, sizeof(mgid));
+
+ mcast = __ipoib_mcast_find(dev, &mgid);
+ if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ struct ipoib_mcast *nmcast;
+
+ /* ignore group which is directly joined by userspace */
+ if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
+ !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
+ ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
+ mgid.raw);
+ continue;
+ }
+
+ /* Not found or send-only group, let's add a new entry */
+ ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
+ mgid.raw);
+
+ nmcast = ipoib_mcast_alloc(dev);
+ if (!nmcast) {
+ ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
+ continue;
+ }
+
+ set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
+
+ nmcast->mcmember.mgid = mgid;
+
+ if (mcast) {
+ /* Destroy the send only entry */
+ list_move_tail(&mcast->list, &remove_list);
+
+ rb_replace_node(&mcast->rb_node,
+ &nmcast->rb_node,
+ &priv->multicast_tree);
+ } else
+ __ipoib_mcast_add(dev, nmcast);
+
+ list_add_tail(&nmcast->list, &priv->multicast_list);
+ }
+
+ if (mcast)
+ set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
+ }
+
+ /* Remove all of the entries don't exist anymore */
+ list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
+ if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
+ !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
+ mcast->mcmember.mgid.raw);
+
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+
+ /* Move to the remove list */
+ list_move_tail(&mcast->list, &remove_list);
+ }
+ }
+
+ spin_unlock_irq(&priv->lock);
+ netif_addr_unlock_bh(dev);
+
+ ipoib_mcast_remove_list(&remove_list);
+
+ /*
+ * Double check that we are still up
+ */
+ if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
+ spin_lock_irq(&priv->lock);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irq(&priv->lock);
+ }
+}
+
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+
+struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
+{
+ struct ipoib_mcast_iter *iter;
+
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return NULL;
+
+ iter->dev = dev;
+ memset(iter->mgid.raw, 0, 16);
+
+ if (ipoib_mcast_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
+ struct rb_node *n;
+ struct ipoib_mcast *mcast;
+ int ret = 1;
+
+ spin_lock_irq(&priv->lock);
+
+ n = rb_first(&priv->multicast_tree);
+
+ while (n) {
+ mcast = rb_entry(n, struct ipoib_mcast, rb_node);
+
+ if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
+ sizeof (union ib_gid)) < 0) {
+ iter->mgid = mcast->mcmember.mgid;
+ iter->created = mcast->created;
+ iter->queuelen = skb_queue_len(&mcast->pkt_queue);
+ iter->complete = !!mcast->ah;
+ iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
+
+ ret = 0;
+
+ break;
+ }
+
+ n = rb_next(n);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
+ union ib_gid *mgid,
+ unsigned long *created,
+ unsigned int *queuelen,
+ unsigned int *complete,
+ unsigned int *send_only)
+{
+ *mgid = iter->mgid;
+ *created = iter->created;
+ *queuelen = iter->queuelen;
+ *complete = iter->complete;
+ *send_only = iter->send_only;
+}
+
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
new file mode 100644
index 000000000..28e9b7084
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. - All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
+#include <linux/module.h>
+#include <net/rtnetlink.h>
+#include "ipoib.h"
+
+static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
+ [IFLA_IPOIB_PKEY] = { .type = NLA_U16 },
+ [IFLA_IPOIB_MODE] = { .type = NLA_U16 },
+ [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
+};
+
+static unsigned int ipoib_get_max_num_queues(void)
+{
+ return min_t(unsigned int, num_possible_cpus(), 128);
+}
+
+static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ u16 val;
+
+ if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
+ goto nla_put_failure;
+
+ val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
+ goto nla_put_failure;
+
+ val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+ if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u16 mode, umcast;
+ int ret = 0;
+
+ if (data[IFLA_IPOIB_MODE]) {
+ mode = nla_get_u16(data[IFLA_IPOIB_MODE]);
+ if (mode == IPOIB_MODE_DATAGRAM)
+ ret = ipoib_set_mode(dev, "datagram\n");
+ else if (mode == IPOIB_MODE_CONNECTED)
+ ret = ipoib_set_mode(dev, "connected\n");
+ else
+ ret = -EINVAL;
+
+ if (ret < 0)
+ goto out_err;
+ }
+
+ if (data[IFLA_IPOIB_UMCAST]) {
+ umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
+ ipoib_set_umcast(dev, umcast);
+ }
+
+out_err:
+ return ret;
+}
+
+static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *pdev;
+ struct ipoib_dev_priv *ppriv;
+ u16 child_pkey;
+ int err;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+
+ pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!pdev || pdev->type != ARPHRD_INFINIBAND)
+ return -ENODEV;
+
+ ppriv = ipoib_priv(pdev);
+
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
+ ipoib_warn(ppriv, "child creation disallowed for child devices\n");
+ return -EINVAL;
+ }
+
+ if (!data || !data[IFLA_IPOIB_PKEY]) {
+ ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
+ child_pkey = ppriv->pkey;
+ } else
+ child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+
+ err = ipoib_intf_init(ppriv->ca, ppriv->port, dev->name, dev);
+ if (err) {
+ ipoib_warn(ppriv, "failed to initialize pkey device\n");
+ return err;
+ }
+
+ err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
+ child_pkey, IPOIB_RTNL_CHILD);
+ if (err)
+ return err;
+
+ if (data) {
+ err = ipoib_changelink(dev, tb, data, extack);
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->parent)
+ return;
+
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t ipoib_get_size(const struct net_device *dev)
+{
+ return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
+ nla_total_size(2) + /* IFLA_IPOIB_MODE */
+ nla_total_size(2); /* IFLA_IPOIB_UMCAST */
+}
+
+static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ .kind = "ipoib",
+ .netns_refund = true,
+ .maxtype = IFLA_IPOIB_MAX,
+ .policy = ipoib_policy,
+ .priv_size = sizeof(struct ipoib_dev_priv),
+ .setup = ipoib_setup_common,
+ .newlink = ipoib_new_child_link,
+ .dellink = ipoib_del_child_link,
+ .changelink = ipoib_changelink,
+ .get_size = ipoib_get_size,
+ .fill_info = ipoib_fill_info,
+ .get_num_rx_queues = ipoib_get_max_num_queues,
+ .get_num_tx_queues = ipoib_get_max_num_queues,
+};
+
+struct rtnl_link_ops *ipoib_get_link_ops(void)
+{
+ return &ipoib_link_ops;
+}
+
+int __init ipoib_netlink_init(void)
+{
+ return rtnl_link_register(&ipoib_link_ops);
+}
+
+void __exit ipoib_netlink_fini(void)
+{
+ rtnl_link_unregister(&ipoib_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("ipoib");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
new file mode 100644
index 000000000..587252fd6
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "ipoib.h"
+
+int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_attr *qp_attr = NULL;
+ int ret;
+ u16 pkey_index;
+
+ if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ ret = -ENXIO;
+ goto out;
+ }
+ set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+
+ if (set_qkey) {
+ ret = -ENOMEM;
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
+ if (!qp_attr)
+ goto out;
+
+ /* set correct QKey for QP */
+ qp_attr->qkey = qkey;
+ ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* attach QP to multicast group */
+ ret = ib_attach_mcast(priv->qp, mgid, mlid);
+ if (ret)
+ ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
+
+out:
+ kfree(qp_attr);
+ return ret;
+}
+
+int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *mgid, u16 mlid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+
+ ret = ib_detach_mcast(priv->qp, mgid, mlid);
+
+ return ret;
+}
+
+int ipoib_init_qp(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret;
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+
+ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
+ return -1;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ qp_attr.qkey = 0;
+ qp_attr.port_num = priv->port;
+ qp_attr.pkey_index = priv->pkey_index;
+ attr_mask =
+ IB_QP_QKEY |
+ IB_QP_PORT |
+ IB_QP_PKEY_INDEX |
+ IB_QP_STATE;
+ ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);
+ goto out_fail;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ /* Can't set this in a INIT->RTR transition */
+ attr_mask &= ~IB_QP_PORT;
+ ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret);
+ goto out_fail;
+ }
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ qp_attr.sq_psn = 0;
+ attr_mask |= IB_QP_SQ_PSN;
+ attr_mask &= ~IB_QP_PKEY_INDEX;
+ ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret);
+ goto out_fail;
+ }
+
+ return 0;
+
+out_fail:
+ qp_attr.qp_state = IB_QPS_RESET;
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
+ ipoib_warn(priv, "Failed to modify QP to RESET state\n");
+
+ return ret;
+}
+
+int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ib_qp_init_attr init_attr = {
+ .cap = {
+ .max_send_wr = ipoib_sendq_size,
+ .max_recv_wr = ipoib_recvq_size,
+ .max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
+ MAX_SKB_FRAGS + 1),
+ .max_recv_sge = IPOIB_UD_RX_SG
+ },
+ .sq_sig_type = IB_SIGNAL_ALL_WR,
+ .qp_type = IB_QPT_UD
+ };
+ struct ib_cq_init_attr cq_attr = {};
+
+ int ret, size, req_vec;
+ int i;
+
+ size = ipoib_recvq_size + 1;
+ ret = ipoib_cm_dev_init(dev);
+ if (!ret) {
+ size += ipoib_sendq_size;
+ if (ipoib_cm_has_srq(dev))
+ size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
+ else
+ size += ipoib_recvq_size * ipoib_max_conn_qp;
+ } else
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ req_vec = (priv->port - 1) * 2;
+
+ cq_attr.cqe = size;
+ cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
+ priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
+ priv, &cq_attr);
+ if (IS_ERR(priv->recv_cq)) {
+ pr_warn("%s: failed to create receive CQ\n", ca->name);
+ goto out_cm_dev_cleanup;
+ }
+
+ cq_attr.cqe = ipoib_sendq_size;
+ cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors;
+ priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
+ priv, &cq_attr);
+ if (IS_ERR(priv->send_cq)) {
+ pr_warn("%s: failed to create send CQ\n", ca->name);
+ goto out_free_recv_cq;
+ }
+
+ if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP))
+ goto out_free_send_cq;
+
+ init_attr.send_cq = priv->send_cq;
+ init_attr.recv_cq = priv->recv_cq;
+
+ if (priv->hca_caps & IB_DEVICE_UD_TSO)
+ init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
+
+ if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
+ init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
+
+ if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
+ init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+
+ if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
+
+ priv->qp = ib_create_qp(priv->pd, &init_attr);
+ if (IS_ERR(priv->qp)) {
+ pr_warn("%s: failed to create QP\n", ca->name);
+ goto out_free_send_cq;
+ }
+
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ goto out_free_send_cq;
+
+ for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
+ priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
+
+ priv->tx_wr.wr.opcode = IB_WR_SEND;
+ priv->tx_wr.wr.sg_list = priv->tx_sge;
+ priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED;
+
+ priv->rx_sge[0].lkey = priv->pd->local_dma_lkey;
+
+ priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr.num_sge = 1;
+
+ priv->rx_wr.next = NULL;
+ priv->rx_wr.sg_list = priv->rx_sge;
+
+ if (init_attr.cap.max_send_sge > 1)
+ dev->features |= NETIF_F_SG;
+
+ priv->max_send_sge = init_attr.cap.max_send_sge;
+
+ return 0;
+
+out_free_send_cq:
+ ib_destroy_cq(priv->send_cq);
+
+out_free_recv_cq:
+ ib_destroy_cq(priv->recv_cq);
+
+out_cm_dev_cleanup:
+ ipoib_cm_dev_cleanup(dev);
+
+ return -ENODEV;
+}
+
+void ipoib_transport_dev_cleanup(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->qp) {
+ if (ib_destroy_qp(priv->qp))
+ ipoib_warn(priv, "ib_qp_destroy failed\n");
+
+ priv->qp = NULL;
+ }
+
+ ib_destroy_cq(priv->send_cq);
+ ib_destroy_cq(priv->recv_cq);
+}
+
+void ipoib_event(struct ib_event_handler *handler,
+ struct ib_event *record)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(handler, struct ipoib_dev_priv, event_handler);
+
+ if (record->element.port_num != priv->port)
+ return;
+
+ ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
+ dev_name(&record->device->dev), record->element.port_num);
+
+ if (record->event == IB_EVENT_CLIENT_REREGISTER) {
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ } else if (record->event == IB_EVENT_PORT_ERR ||
+ record->event == IB_EVENT_PORT_ACTIVE ||
+ record->event == IB_EVENT_LID_CHANGE) {
+ queue_work(ipoib_workqueue, &priv->flush_normal);
+ } else if (record->event == IB_EVENT_PKEY_CHANGE) {
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+ } else if (record->event == IB_EVENT_GID_CHANGE &&
+ !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ }
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
new file mode 100644
index 000000000..4c50a87ed
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+
+#include <linux/init.h>
+#include <linux/seq_file.h>
+
+#include <linux/uaccess.h>
+
+#include "ipoib.h"
+
+static ssize_t show_parent(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return sprintf(buf, "%s\n", priv->parent->name);
+}
+static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
+
+static bool is_child_unique(struct ipoib_dev_priv *ppriv,
+ struct ipoib_dev_priv *priv)
+{
+ struct ipoib_dev_priv *tpriv;
+
+ ASSERT_RTNL();
+
+ /*
+ * Since the legacy sysfs interface uses pkey for deletion it cannot
+ * support more than one interface with the same pkey, it creates
+ * ambiguity. The RTNL interface deletes using the netdev so it does
+ * not have a problem to support duplicated pkeys.
+ */
+ if (priv->child_type != IPOIB_LEGACY_CHILD)
+ return true;
+
+ /*
+ * First ensure this isn't a duplicate. We check the parent device and
+ * then all of the legacy child interfaces to make sure the Pkey
+ * doesn't match.
+ */
+ if (ppriv->pkey == priv->pkey)
+ return false;
+
+ list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
+ if (tpriv->pkey == priv->pkey &&
+ tpriv->child_type == IPOIB_LEGACY_CHILD)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * NOTE: If this function fails then the priv->dev will remain valid, however
+ * priv will have been freed and must not be touched by caller in the error
+ * case.
+ *
+ * If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
+ * free the net_device (just as rtnl_newlink does) otherwise the net_device
+ * will be freed when the rtnl is unlocked.
+ */
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+ u16 pkey, int type)
+{
+ struct net_device *ndev = priv->dev;
+ int result;
+ struct rdma_netdev *rn = netdev_priv(ndev);
+
+ ASSERT_RTNL();
+
+ /*
+ * We do not need to touch priv if register_netdevice fails, so just
+ * always use this flow.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ /*
+ * Racing with unregister of the parent must be prevented by the
+ * caller.
+ */
+ WARN_ON(ppriv->dev->reg_state != NETREG_REGISTERED);
+
+ if (pkey == 0 || pkey == 0x8000) {
+ result = -EINVAL;
+ goto out_early;
+ }
+
+ rn->mtu = priv->mcast_mtu;
+
+ priv->parent = ppriv->dev;
+ priv->pkey = pkey;
+ priv->child_type = type;
+
+ if (!is_child_unique(ppriv, priv)) {
+ result = -ENOTUNIQ;
+ goto out_early;
+ }
+
+ result = register_netdevice(ndev);
+ if (result) {
+ ipoib_warn(priv, "failed to initialize; error %i", result);
+
+ /*
+ * register_netdevice sometimes calls priv_destructor,
+ * sometimes not. Make sure it was done.
+ */
+ goto out_early;
+ }
+
+ /* RTNL childs don't need proprietary sysfs entries */
+ if (type == IPOIB_LEGACY_CHILD) {
+ if (ipoib_cm_add_mode_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_pkey_attr(ndev))
+ goto sysfs_failed;
+ if (ipoib_add_umcast_attr(ndev))
+ goto sysfs_failed;
+
+ if (device_create_file(&ndev->dev, &dev_attr_parent))
+ goto sysfs_failed;
+ }
+
+ return 0;
+
+sysfs_failed:
+ unregister_netdevice(priv->dev);
+ return -ENOMEM;
+
+out_early:
+ if (ndev->priv_destructor)
+ ndev->priv_destructor(ndev);
+ return result;
+}
+
+int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+{
+ struct ipoib_dev_priv *ppriv, *priv;
+ char intf_name[IFNAMSIZ];
+ struct net_device *ndev;
+ int result;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (pdev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
+ ppriv = ipoib_priv(pdev);
+
+ snprintf(intf_name, sizeof(intf_name), "%s.%04x",
+ ppriv->dev->name, pkey);
+
+ ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (IS_ERR(ndev)) {
+ result = PTR_ERR(ndev);
+ goto out;
+ }
+ priv = ipoib_priv(ndev);
+
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
+
+ result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+
+ if (result && ndev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(ndev);
+
+out:
+ rtnl_unlock();
+
+ return result;
+}
+
+struct ipoib_vlan_delete_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+/*
+ * sysfs callbacks of a netdevice cannot obtain the rtnl lock as
+ * unregister_netdev ultimately deletes the sysfs files while holding the rtnl
+ * lock. This deadlocks the system.
+ *
+ * A callback can use rtnl_trylock to avoid the deadlock but it cannot call
+ * unregister_netdev as that internally takes and releases the rtnl_lock. So
+ * instead we find the netdev to unregister and then do the actual unregister
+ * from the global work queue where we can obtain the rtnl_lock safely.
+ */
+static void ipoib_vlan_delete_task(struct work_struct *work)
+{
+ struct ipoib_vlan_delete_work *pwork =
+ container_of(work, struct ipoib_vlan_delete_work, work);
+ struct net_device *dev = pwork->dev;
+
+ rtnl_lock();
+
+ /* Unregistering tasks can race with another task or parent removal */
+ if (dev->reg_state == NETREG_REGISTERED) {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
+ }
+
+ rtnl_unlock();
+
+ kfree(pwork);
+}
+
+int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
+{
+ struct ipoib_dev_priv *ppriv, *priv, *tpriv;
+ int rc;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (pdev->reg_state != NETREG_REGISTERED) {
+ rtnl_unlock();
+ return -EPERM;
+ }
+
+ ppriv = ipoib_priv(pdev);
+
+ rc = -ENODEV;
+ list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
+ if (priv->pkey == pkey &&
+ priv->child_type == IPOIB_LEGACY_CHILD) {
+ struct ipoib_vlan_delete_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del_init(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+ work->dev = priv->dev;
+ INIT_WORK(&work->work, ipoib_vlan_delete_task);
+ queue_work(ipoib_workqueue, &work->work);
+
+ rc = 0;
+ break;
+ }
+ }
+
+out:
+ rtnl_unlock();
+
+ return rc;
+}