summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c54
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c89
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c39
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c231
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h7
17 files changed, 265 insertions, 301 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 6f9ec8db01..255677bc12 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -162,8 +162,6 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
-
- rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
}
/* called by ifc layer to create new rxe device.
@@ -183,7 +181,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
int err = 0;
if (is_vlan_dev(ndev)) {
- rxe_err("rxe creation allowed on top of a real device only");
+ rxe_err("rxe creation allowed on top of a real device only\n");
err = -EPERM;
goto err;
}
@@ -191,7 +189,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
rxe = rxe_get_dev_from_net(ndev);
if (rxe) {
ib_device_put(&rxe->ib_dev);
- rxe_err_dev(rxe, "already configured on %s", ndev->name);
+ rxe_err_dev(rxe, "already configured on %s\n", ndev->name);
err = -EEXIST;
goto err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index d33dd6cf83..d8fb2c7af3 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -38,7 +38,7 @@
#define RXE_ROCE_V2_SPORT (0xc000)
-#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
@@ -58,7 +58,7 @@
#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
+#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
@@ -79,7 +79,7 @@
#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
+#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index acd2172bf0..d48af21807 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -122,25 +122,16 @@ void retransmit_timer(struct timer_list *t)
spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) {
qp->comp.timeout = 1;
- rxe_sched_task(&qp->comp.task);
+ rxe_sched_task(&qp->send_task);
}
spin_unlock_irqrestore(&qp->state_lock, flags);
}
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
- int must_sched;
-
- must_sched = skb_queue_len(&qp->resp_pkts) > 0;
- if (must_sched != 0)
- rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
-
+ rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
skb_queue_tail(&qp->resp_pkts, skb);
-
- if (must_sched)
- rxe_sched_task(&qp->comp.task);
- else
- rxe_run_task(&qp->comp.task);
+ rxe_sched_task(&qp->send_task);
}
static inline enum comp_state get_wqe(struct rxe_qp *qp,
@@ -325,7 +316,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
return COMPST_ERROR_RETRY;
@@ -433,7 +424,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
} else {
if (wqe->status != IB_WC_WR_FLUSH_ERR)
- rxe_err_qp(qp, "non-flush error status = %d",
+ rxe_err_qp(qp, "non-flush error status = %d\n",
wqe->status);
}
}
@@ -476,7 +467,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
@@ -515,7 +506,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
@@ -541,7 +532,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
}
@@ -582,7 +573,7 @@ static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
err = rxe_cq_post(qp->scq, &cqe, 0);
if (err)
- rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
+ rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err);
return err;
}
@@ -654,6 +645,8 @@ int rxe_completer(struct rxe_qp *qp)
int ret;
unsigned long flags;
+ qp->req.again = 0;
+
spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) {
@@ -737,7 +730,7 @@ int rxe_completer(struct rxe_qp *qp)
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
state = COMPST_DONE;
@@ -792,7 +785,7 @@ int rxe_completer(struct rxe_qp *qp)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_sched_task(&qp->req.task);
+ qp->req.again = 1;
}
goto done;
@@ -843,8 +836,9 @@ done:
ret = 0;
goto out;
exit:
- ret = -EAGAIN;
+ ret = (qp->req.again) ? 0 : -EAGAIN;
out:
+ qp->req.again = 0;
if (pkt)
free_pkt(pkt);
return ret;
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index d5486cbb3f..fec87c9030 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -27,7 +27,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
- rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)",
+ rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
cqe, count);
goto err1;
}
@@ -96,7 +96,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- rxe_err_cq(cq, "queue full");
+ rxe_err_cq(cq, "queue full\n");
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index a012522b57..437917a7d8 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -14,7 +14,7 @@ static const struct rdma_stat_desc rxe_counter_descs[] = {
[RXE_CNT_RCV_RNR].name = "rcvd_rnr_err",
[RXE_CNT_SND_RNR].name = "send_rnr_err",
[RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err",
- [RXE_CNT_COMPLETER_SCHED].name = "ack_deferred",
+ [RXE_CNT_SENDER_SCHED].name = "ack_deferred",
[RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err",
[RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err",
[RXE_CNT_COMP_RETRY].name = "completer_retry_err",
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
index 71f4d4fa9d..051f9e1c38 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.h
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
@@ -18,7 +18,7 @@ enum rxe_counters {
RXE_CNT_RCV_RNR,
RXE_CNT_SND_RNR,
RXE_CNT_RCV_SEQ_ERR,
- RXE_CNT_COMPLETER_SCHED,
+ RXE_CNT_SENDER_SCHED,
RXE_CNT_RETRY_EXCEEDED,
RXE_CNT_RNR_RETRY_EXCEEDED,
RXE_CNT_COMP_RETRY,
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 4d2a8ef52c..ded4611915 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -59,7 +59,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(int access, struct rxe_mr *mr);
-int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length);
@@ -164,7 +164,8 @@ void rxe_dealloc(struct ib_device *ib_dev);
int rxe_completer(struct rxe_qp *qp);
int rxe_requester(struct rxe_qp *qp);
-int rxe_responder(struct rxe_qp *qp);
+int rxe_sender(struct rxe_qp *qp);
+int rxe_receiver(struct rxe_qp *qp);
/* rxe_icrc.c */
int rxe_icrc_init(struct rxe_dev *rxe);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index f54042e9ae..da3dee5208 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -34,7 +34,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_MEM_REG:
if (iova < mr->ibmr.iova ||
iova + length > mr->ibmr.iova + mr->ibmr.length) {
- rxe_dbg_mr(mr, "iova/length out of range");
+ rxe_dbg_mr(mr, "iova/length out of range\n");
return -EINVAL;
}
return 0;
@@ -126,7 +126,7 @@ static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
return xas_error(&xas);
}
-int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr)
{
struct ib_umem *umem;
@@ -319,7 +319,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
err = mr_check_range(mr, iova, length);
if (unlikely(err)) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return err;
}
@@ -477,7 +477,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 *va;
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state");
+ rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@@ -490,7 +490,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
err = mr_check_range(mr, iova, sizeof(value));
if (err) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@@ -501,7 +501,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
}
if (unlikely(page_offset & 0x7)) {
- rxe_dbg_mr(mr, "iova not aligned");
+ rxe_dbg_mr(mr, "iova not aligned\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
@@ -534,7 +534,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state");
+ rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@@ -548,7 +548,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
err = mr_check_range(mr, iova, sizeof(value));
if (unlikely(err)) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@@ -560,7 +560,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA A19.4.2 */
if (unlikely(page_offset & 0x7)) {
- rxe_dbg_mr(mr, "misaligned address");
+ rxe_dbg_mr(mr, "misaligned address\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index d9312b5c9d..379e65bfcd 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -198,7 +198,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
if (access & ~RXE_ACCESS_SUPPORTED_MW) {
- rxe_err_mw(mw, "access %#x not supported", access);
+ rxe_err_mw(mw, "access %#x not supported\n", access);
ret = -EOPNOTSUPP;
goto err_drop_mr;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index e5827064ab..ca9a82e1c4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -345,25 +345,44 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
static void rxe_skb_tx_dtor(struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
- struct rxe_qp *qp = sk->sk_user_data;
- int skb_out = atomic_dec_return(&qp->skb_out);
+ struct net_device *ndev = skb->dev;
+ struct rxe_dev *rxe;
+ unsigned int qp_index;
+ struct rxe_qp *qp;
+ int skb_out;
+
+ rxe = rxe_get_dev_from_net(ndev);
+ if (!rxe && is_vlan_dev(ndev))
+ rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
+ if (WARN_ON(!rxe))
+ return;
+
+ qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
+ if (!qp_index)
+ return;
+
+ qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
+ if (!qp)
+ goto put_dev;
- if (unlikely(qp->need_req_skb &&
- skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
- rxe_sched_task(&qp->req.task);
+ skb_out = atomic_dec_return(&qp->skb_out);
+ if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
+ rxe_sched_task(&qp->send_task);
rxe_put(qp);
+put_dev:
+ ib_device_put(&rxe->ib_dev);
+ sock_put(skb->sk);
}
static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
int err;
+ struct sock *sk = pkt->qp->sk->sk;
+ sock_hold(sk);
+ skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
- skb->sk = pkt->qp->sk->sk;
-
- rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
@@ -371,12 +390,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
else
err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- if (unlikely(net_xmit_eval(err))) {
- rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err);
- return -EAGAIN;
- }
-
- return 0;
+ return err;
}
/* fix up a send packet to match the packets
@@ -384,8 +398,15 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
*/
static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
+ struct sock *sk = pkt->qp->sk->sk;
+
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ atomic_inc(&pkt->qp->skb_out);
+
if (skb->protocol == htons(ETH_P_IP))
skb_pull(skb, sizeof(struct iphdr));
else
@@ -432,12 +453,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
return err;
}
- if ((qp_type(qp) != IB_QPT_RC) &&
- (pkt->mask & RXE_END_MASK)) {
- pkt->wqe->state = wqe_state_done;
- rxe_sched_task(&qp->comp.task);
- }
-
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
goto done;
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 6215c6de3a..67567d6219 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -119,7 +119,7 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
bool sleepable)
{
- int err;
+ int err = -EINVAL;
gfp_t gfp_flags;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
@@ -147,7 +147,7 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
err_cnt:
atomic_dec(&pool->num_elem);
- return -EINVAL;
+ return err;
}
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 28e379c108..d2f7b5195c 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -201,7 +201,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->sq.queue) {
- rxe_err_qp(qp, "Unable to allocate send queue");
+ rxe_err_qp(qp, "Unable to allocate send queue\n");
err = -ENOMEM;
goto err_out;
}
@@ -211,7 +211,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue->buf, qp->sq.queue->buf_size,
&qp->sq.queue->ip);
if (err) {
- rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}
@@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
return err;
- qp->sk->sk->sk_user_data = qp;
+ qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
/* pick a source UDP port number for this QP based on
* the source QPN. this spreads traffic for different QPs
@@ -265,8 +265,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->req.opcode = -1;
qp->comp.opcode = -1;
- rxe_init_task(&qp->req.task, qp, rxe_requester);
- rxe_init_task(&qp->comp.task, qp, rxe_completer);
+ rxe_init_task(&qp->send_task, qp, rxe_sender);
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
if (init->qp_type == IB_QPT_RC) {
@@ -292,7 +291,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->rq.queue) {
- rxe_err_qp(qp, "Unable to allocate recv queue");
+ rxe_err_qp(qp, "Unable to allocate recv queue\n");
err = -ENOMEM;
goto err_out;
}
@@ -302,7 +301,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
- rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}
@@ -337,7 +336,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- rxe_init_task(&qp->resp.task, qp, rxe_responder);
+ rxe_init_task(&qp->recv_task, qp, rxe_receiver);
qp->resp.opcode = OPCODE_NONE;
qp->resp.msn = 0;
@@ -514,14 +513,12 @@ err1:
static void rxe_qp_reset(struct rxe_qp *qp)
{
/* stop tasks from running */
- rxe_disable_task(&qp->resp.task);
- rxe_disable_task(&qp->comp.task);
- rxe_disable_task(&qp->req.task);
+ rxe_disable_task(&qp->recv_task);
+ rxe_disable_task(&qp->send_task);
/* drain work and packet queuesc */
- rxe_requester(qp);
- rxe_completer(qp);
- rxe_responder(qp);
+ rxe_sender(qp);
+ rxe_receiver(qp);
if (qp->rq.queue)
rxe_queue_reset(qp->rq.queue);
@@ -548,9 +545,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
cleanup_rd_atomic_resources(qp);
/* reenable tasks */
- rxe_enable_task(&qp->resp.task);
- rxe_enable_task(&qp->comp.task);
- rxe_enable_task(&qp->req.task);
+ rxe_enable_task(&qp->recv_task);
+ rxe_enable_task(&qp->send_task);
}
/* move the qp to the error state */
@@ -562,9 +558,8 @@ void rxe_qp_error(struct rxe_qp *qp)
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
- rxe_sched_task(&qp->resp.task);
- rxe_sched_task(&qp->comp.task);
- rxe_sched_task(&qp->req.task);
+ rxe_sched_task(&qp->recv_task);
+ rxe_sched_task(&qp->send_task);
spin_unlock_irqrestore(&qp->state_lock, flags);
}
@@ -575,8 +570,7 @@ static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.sq_draining = 1;
- rxe_sched_task(&qp->comp.task);
- rxe_sched_task(&qp->req.task);
+ rxe_sched_task(&qp->send_task);
spin_unlock_irqrestore(&qp->state_lock, flags);
}
@@ -821,19 +815,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
del_timer_sync(&qp->rnr_nak_timer);
}
- if (qp->resp.task.func)
- rxe_cleanup_task(&qp->resp.task);
+ if (qp->recv_task.func)
+ rxe_cleanup_task(&qp->recv_task);
- if (qp->req.task.func)
- rxe_cleanup_task(&qp->req.task);
-
- if (qp->comp.task.func)
- rxe_cleanup_task(&qp->comp.task);
+ if (qp->send_task.func)
+ rxe_cleanup_task(&qp->send_task);
/* flush out any receive wr's or pending requests */
- rxe_requester(qp);
- rxe_completer(qp);
- rxe_responder(qp);
+ rxe_sender(qp);
+ rxe_receiver(qp);
if (qp->sq.queue)
rxe_queue_cleanup(qp->sq.queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d8c41fd626..479c07e6e4 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -108,7 +108,7 @@ void rnr_nak_timer(struct timer_list *t)
/* request a send queue retry */
qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0;
- rxe_sched_task(&qp->req.task);
+ rxe_sched_task(&qp->send_task);
}
spin_unlock_irqrestore(&qp->state_lock, flags);
}
@@ -424,7 +424,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
int paylen;
int solicited;
u32 qp_num;
- int ack_req;
+ int ack_req = 0;
/* length from start of bth to end of icrc */
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
@@ -445,8 +445,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num;
- ack_req = ((pkt->mask & RXE_END_MASK) ||
- (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+ if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
+ ack_req = ((pkt->mask & RXE_END_MASK) ||
+ (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
if (ack_req)
qp->req.noack_pkts = 0;
@@ -545,6 +546,8 @@ static void update_wqe_state(struct rxe_qp *qp,
if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending;
+ else
+ wqe->state = wqe_state_done;
} else {
wqe->state = wqe_state_processing;
}
@@ -573,30 +576,6 @@ static void update_wqe_psn(struct rxe_qp *qp,
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
}
-static void save_state(struct rxe_send_wqe *wqe,
- struct rxe_qp *qp,
- struct rxe_send_wqe *rollback_wqe,
- u32 *rollback_psn)
-{
- rollback_wqe->state = wqe->state;
- rollback_wqe->first_psn = wqe->first_psn;
- rollback_wqe->last_psn = wqe->last_psn;
- rollback_wqe->dma = wqe->dma;
- *rollback_psn = qp->req.psn;
-}
-
-static void rollback_state(struct rxe_send_wqe *wqe,
- struct rxe_qp *qp,
- struct rxe_send_wqe *rollback_wqe,
- u32 rollback_psn)
-{
- wqe->state = rollback_wqe->state;
- wqe->first_psn = rollback_wqe->first_psn;
- wqe->last_psn = rollback_wqe->last_psn;
- wqe->dma = rollback_wqe->dma;
- qp->req.psn = rollback_psn;
-}
-
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{
qp->req.opcode = pkt->opcode;
@@ -655,12 +634,6 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
- /* There is no ack coming for local work requests
- * which can lead to a deadlock. So go ahead and complete
- * it now.
- */
- rxe_sched_task(&qp->comp.task);
-
return 0;
}
@@ -676,8 +649,6 @@ int rxe_requester(struct rxe_qp *qp)
int opcode;
int err;
int ret;
- struct rxe_send_wqe rollback_wqe;
- u32 rollback_psn;
struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah;
struct rxe_av *av;
@@ -786,7 +757,6 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- rxe_sched_task(&qp->comp.task);
goto done;
}
payload = mtu;
@@ -799,9 +769,6 @@ int rxe_requester(struct rxe_qp *qp)
pkt.mask = rxe_opcode[opcode].mask;
pkt.wqe = wqe;
- /* save wqe state before we build and send packet */
- save_state(wqe, qp, &rollback_wqe, &rollback_psn);
-
av = rxe_get_av(&pkt, &ah);
if (unlikely(!av)) {
rxe_dbg_qp(qp, "Failed no address vector\n");
@@ -834,31 +801,14 @@ int rxe_requester(struct rxe_qp *qp)
if (ah)
rxe_put(ah);
- /* update wqe state as though we had sent it */
- update_wqe_state(qp, wqe, &pkt);
- update_wqe_psn(qp, wqe, &pkt, payload);
-
err = rxe_xmit_packet(qp, &pkt, skb);
if (err) {
- if (err != -EAGAIN) {
- wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err;
- }
-
- /* the packet was dropped so reset wqe to the state
- * before we sent it so we can try to resend
- */
- rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
-
- /* force a delay until the dropped packet is freed and
- * the send queue is drained below the low water mark
- */
- qp->need_req_skb = 1;
-
- rxe_sched_task(&qp->req.task);
- goto exit;
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
}
+ update_wqe_state(qp, wqe, &pkt);
+ update_wqe_psn(qp, wqe, &pkt, payload);
update_state(qp, &pkt);
/* A non-zero return value will cause rxe_do_task to
@@ -878,3 +828,20 @@ exit:
out:
return ret;
}
+
+int rxe_sender(struct rxe_qp *qp)
+{
+ int req_ret;
+ int comp_ret;
+
+ /* process the send queue */
+ req_ret = rxe_requester(qp);
+
+ /* process the response queue */
+ comp_ret = rxe_completer(qp);
+
+ /* exit the task loop if both requester and completer
+ * are ready
+ */
+ return (req_ret && comp_ret) ? -EAGAIN : 0;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index da470a925e..6596a85723 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -49,18 +49,8 @@ static char *resp_state_name[] = {
/* rxe_recv calls here to add a request packet to the input queue */
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
- int must_sched;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
-
skb_queue_tail(&qp->req_pkts, skb);
-
- must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
- (skb_queue_len(&qp->req_pkts) > 1);
-
- if (must_sched)
- rxe_sched_task(&qp->resp.task);
- else
- rxe_run_task(&qp->resp.task);
+ rxe_sched_task(&qp->recv_task);
}
static inline enum resp_states get_req(struct rxe_qp *qp,
@@ -354,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
* receive buffer later. For rmda operations additional
* length checks are performed in check_rkey.
*/
+ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
+ unsigned int payload = payload_size(pkt);
+ unsigned int recv_buffer_len = 0;
+ int i;
+
+ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
+ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
+ if (payload + 40 > recv_buffer_len) {
+ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
(qp_type(qp) == IB_QPT_UC))) {
unsigned int mtu = qp->mtu;
@@ -362,18 +365,18 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
if ((pkt->mask & RXE_START_MASK) &&
(pkt->mask & RXE_END_MASK)) {
if (unlikely(payload > mtu)) {
- rxe_dbg_qp(qp, "only packet too long");
+ rxe_dbg_qp(qp, "only packet too long\n");
return RESPST_ERR_LENGTH;
}
} else if ((pkt->mask & RXE_START_MASK) ||
(pkt->mask & RXE_MIDDLE_MASK)) {
if (unlikely(payload != mtu)) {
- rxe_dbg_qp(qp, "first or middle packet not mtu");
+ rxe_dbg_qp(qp, "first or middle packet not mtu\n");
return RESPST_ERR_LENGTH;
}
} else if (pkt->mask & RXE_END_MASK) {
if (unlikely((payload == 0) || (payload > mtu))) {
- rxe_dbg_qp(qp, "last packet zero or too long");
+ rxe_dbg_qp(qp, "last packet zero or too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -382,7 +385,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
/* See IBA C9-94 */
if (pkt->mask & RXE_RETH_MASK) {
if (reth_len(pkt) > (1U << 31)) {
- rxe_dbg_qp(qp, "dma length too long");
+ rxe_dbg_qp(qp, "dma length too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -1133,7 +1136,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- rxe_err_qp(qp, "non-flush error status = %d",
+ rxe_err_qp(qp, "non-flush error status = %d\n",
wc->status);
}
@@ -1442,7 +1445,7 @@ static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
err = rxe_cq_post(qp->rcq, &cqe, 0);
if (err)
- rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
+ rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err);
return err;
}
@@ -1485,7 +1488,7 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
qp->resp.wqe = NULL;
}
-int rxe_responder(struct rxe_qp *qp)
+int rxe_receiver(struct rxe_qp *qp)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 1501120d4f..80332638d9 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -156,7 +156,7 @@ static void do_task(struct rxe_task *task)
default:
WARN_ON(1);
- rxe_dbg_qp(task->qp, "unexpected task state = %d",
+ rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
task->state);
task->state = TASK_STATE_IDLE;
}
@@ -167,7 +167,7 @@ exit:
if (WARN_ON(task->num_done != task->num_sched))
rxe_dbg_qp(
task->qp,
- "%ld tasks scheduled, %ld tasks done",
+ "%ld tasks scheduled, %ld tasks done\n",
task->num_sched, task->num_done);
}
spin_unlock_irqrestore(&task->lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 0930350522..de6238ee43 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -23,7 +23,7 @@ static int rxe_query_device(struct ib_device *ibdev,
int err;
if (udata->inlen || udata->outlen) {
- rxe_dbg_dev(rxe, "malformed udata");
+ rxe_dbg_dev(rxe, "malformed udata\n");
err = -EINVAL;
goto err_out;
}
@@ -33,7 +33,7 @@ static int rxe_query_device(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -45,7 +45,7 @@ static int rxe_query_port(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@@ -67,7 +67,7 @@ static int rxe_query_port(struct ib_device *ibdev,
return ret;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -79,7 +79,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
if (index != 0) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad pkey index = %d", index);
+ rxe_dbg_dev(rxe, "bad pkey index = %d\n", index);
goto err_out;
}
@@ -87,7 +87,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -100,7 +100,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC)) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@@ -115,7 +115,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -128,14 +128,14 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
//TODO is shutdown useful
if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@@ -149,7 +149,7 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -161,14 +161,14 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
return IB_LINK_LAYER_ETHERNET;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -181,7 +181,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@@ -197,7 +197,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -210,7 +210,7 @@ static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->uc_pool, uc);
if (err)
- rxe_err_dev(rxe, "unable to create uc");
+ rxe_err_dev(rxe, "unable to create uc\n");
return err;
}
@@ -222,7 +222,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
err = rxe_cleanup(uc);
if (err)
- rxe_err_uc(uc, "cleanup failed, err = %d", err);
+ rxe_err_uc(uc, "cleanup failed, err = %d\n", err);
}
/* pd */
@@ -234,14 +234,14 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->pd_pool, pd);
if (err) {
- rxe_dbg_dev(rxe, "unable to alloc pd");
+ rxe_dbg_dev(rxe, "unable to alloc pd\n");
goto err_out;
}
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -252,7 +252,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_cleanup(pd);
if (err)
- rxe_err_pd(pd, "cleanup failed, err = %d", err);
+ rxe_err_pd(pd, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -279,7 +279,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err) {
- rxe_dbg_dev(rxe, "unable to create ah");
+ rxe_dbg_dev(rxe, "unable to create ah\n");
goto err_out;
}
@@ -288,7 +288,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
if (err) {
- rxe_dbg_ah(ah, "bad attr");
+ rxe_dbg_ah(ah, "bad attr\n");
goto err_cleanup;
}
@@ -298,7 +298,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
sizeof(uresp->ah_num));
if (err) {
err = -EFAULT;
- rxe_dbg_ah(ah, "unable to copy to user");
+ rxe_dbg_ah(ah, "unable to copy to user\n");
goto err_cleanup;
}
} else if (ah->is_user) {
@@ -314,9 +314,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
err_cleanup:
cleanup_err = rxe_cleanup(ah);
if (cleanup_err)
- rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_ah(ah, "returned err = %d", err);
+ rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@@ -327,7 +327,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
err = rxe_ah_chk_attr(ah, attr);
if (err) {
- rxe_dbg_ah(ah, "bad attr");
+ rxe_dbg_ah(ah, "bad attr\n");
goto err_out;
}
@@ -336,7 +336,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
err_out:
- rxe_err_ah(ah, "returned err = %d", err);
+ rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@@ -358,7 +358,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
if (err)
- rxe_err_ah(ah, "cleanup failed, err = %d", err);
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -376,7 +376,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_err_dev(rxe, "malformed udata");
+ rxe_err_dev(rxe, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@@ -384,20 +384,20 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (init->srq_type != IB_SRQT_BASIC) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "srq type = %d, not supported",
+ rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
init->srq_type);
goto err_out;
}
err = rxe_srq_chk_init(rxe, init);
if (err) {
- rxe_dbg_dev(rxe, "invalid init attributes");
+ rxe_dbg_dev(rxe, "invalid init attributes\n");
goto err_out;
}
err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err) {
- rxe_dbg_dev(rxe, "unable to create srq, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
goto err_out;
}
@@ -406,7 +406,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err) {
- rxe_dbg_srq(srq, "create srq failed, err = %d", err);
+ rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
goto err_cleanup;
}
@@ -415,9 +415,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(srq);
if (cleanup_err)
- rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -433,34 +433,34 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (udata) {
if (udata->inlen < sizeof(cmd)) {
err = -EINVAL;
- rxe_dbg_srq(srq, "malformed udata");
+ rxe_dbg_srq(srq, "malformed udata\n");
goto err_out;
}
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
err = -EFAULT;
- rxe_dbg_srq(srq, "unable to read udata");
+ rxe_dbg_srq(srq, "unable to read udata\n");
goto err_out;
}
}
err = rxe_srq_chk_attr(rxe, srq, attr, mask);
if (err) {
- rxe_dbg_srq(srq, "bad init attributes");
+ rxe_dbg_srq(srq, "bad init attributes\n");
goto err_out;
}
err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
if (err) {
- rxe_dbg_srq(srq, "bad attr");
+ rxe_dbg_srq(srq, "bad attr\n");
goto err_out;
}
return 0;
err_out:
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@@ -471,7 +471,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
if (srq->error) {
err = -EINVAL;
- rxe_dbg_srq(srq, "srq in error state");
+ rxe_dbg_srq(srq, "srq in error state\n");
goto err_out;
}
@@ -481,7 +481,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
err_out:
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@@ -505,7 +505,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (err) {
*bad_wr = wr;
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
}
return err;
@@ -518,7 +518,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
err = rxe_cleanup(srq);
if (err)
- rxe_err_srq(srq, "cleanup failed, err = %d", err);
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -536,13 +536,13 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (udata) {
if (udata->inlen) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
@@ -554,25 +554,25 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (init->create_flags) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err);
+ rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err);
goto err_out;
}
err = rxe_qp_chk_init(rxe, init);
if (err) {
- rxe_dbg_dev(rxe, "bad init attr, err = %d", err);
+ rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->qp_pool, qp);
if (err) {
- rxe_dbg_dev(rxe, "unable to create qp, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err) {
- rxe_dbg_qp(qp, "create qp failed, err = %d", err);
+ rxe_dbg_qp(qp, "create qp failed, err = %d\n", err);
goto err_cleanup;
}
@@ -582,9 +582,9 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(qp);
if (cleanup_err)
- rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -597,20 +597,20 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
err = -EOPNOTSUPP;
- rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d",
+ rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n",
mask, err);
goto err_out;
}
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err) {
- rxe_dbg_qp(qp, "bad mask/attr, err = %d", err);
+ rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_attr(qp, attr, mask, udata);
if (err) {
- rxe_dbg_qp(qp, "modify qp failed, err = %d", err);
+ rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err);
goto err_out;
}
@@ -622,7 +622,7 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
err_out:
- rxe_err_qp(qp, "returned err = %d", err);
+ rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@@ -644,18 +644,18 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
err = rxe_qp_chk_destroy(qp);
if (err) {
- rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err);
+ rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err);
goto err_out;
}
err = rxe_cleanup(qp);
if (err)
- rxe_err_qp(qp, "cleanup failed, err = %d", err);
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", err);
return 0;
err_out:
- rxe_err_qp(qp, "returned err = %d", err);
+ rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@@ -675,12 +675,12 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
do {
mask = wr_opcode_mask(ibwr->opcode, qp);
if (!mask) {
- rxe_err_qp(qp, "bad wr opcode for qp type");
+ rxe_err_qp(qp, "bad wr opcode for qp type\n");
break;
}
if (num_sge > sq->max_sge) {
- rxe_err_qp(qp, "num_sge > max_sge");
+ rxe_err_qp(qp, "num_sge > max_sge\n");
break;
}
@@ -689,27 +689,27 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
length += ibwr->sg_list[i].length;
if (length > (1UL << 31)) {
- rxe_err_qp(qp, "message length too long");
+ rxe_err_qp(qp, "message length too long\n");
break;
}
if (mask & WR_ATOMIC_MASK) {
if (length != 8) {
- rxe_err_qp(qp, "atomic length != 8");
+ rxe_err_qp(qp, "atomic length != 8\n");
break;
}
if (atomic_wr(ibwr)->remote_addr & 0x7) {
- rxe_err_qp(qp, "misaligned atomic address");
+ rxe_err_qp(qp, "misaligned atomic address\n");
break;
}
}
if (ibwr->send_flags & IB_SEND_INLINE) {
if (!(mask & WR_INLINE_MASK)) {
- rxe_err_qp(qp, "opcode doesn't support inline data");
+ rxe_err_qp(qp, "opcode doesn't support inline data\n");
break;
}
if (length > sq->max_inline) {
- rxe_err_qp(qp, "inline length too big");
+ rxe_err_qp(qp, "inline length too big\n");
break;
}
}
@@ -747,7 +747,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_SEND:
break;
default:
- rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP",
+ rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n",
wr->opcode);
return -EINVAL;
}
@@ -795,7 +795,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_ATOMIC_WRITE:
break;
default:
- rxe_err_qp(qp, "unsupported wr opcode %d",
+ rxe_err_qp(qp, "unsupported wr opcode %d\n",
wr->opcode);
return -EINVAL;
}
@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
int i;
for (i = 0; i < ibwr->num_sge; i++, sge++) {
- memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
+ memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
p += sge->length;
}
}
@@ -870,7 +870,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr)
full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
- rxe_err_qp(qp, "send queue full");
+ rxe_err_qp(qp, "send queue full\n");
return -ENOMEM;
}
@@ -905,12 +905,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
/* kickoff processing of any posted wqes */
if (good)
- rxe_sched_task(&qp->req.task);
-
- spin_lock_irqsave(&qp->state_lock, flags);
- if (qp_state(qp) == IB_QPS_ERR)
- rxe_sched_task(&qp->comp.task);
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ rxe_sched_task(&qp->send_task);
return err;
}
@@ -926,21 +921,21 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
- rxe_err_qp(qp, "qp has been destroyed");
+ rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
- rxe_err_qp(qp, "qp not ready to send");
+ rxe_err_qp(qp, "qp not ready to send\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->is_user) {
/* Utilize process context to do protocol processing */
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->send_task);
} else {
err = rxe_post_send_kernel(qp, wr, bad_wr);
if (err)
@@ -963,13 +958,13 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
err = -ENOMEM;
- rxe_dbg("queue full");
+ rxe_dbg("queue full\n");
goto err_out;
}
if (unlikely(num_sge > rq->max_sge)) {
err = -EINVAL;
- rxe_dbg("bad num_sge > max_sge");
+ rxe_dbg("bad num_sge > max_sge\n");
goto err_out;
}
@@ -980,7 +975,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
/* IBA max message size is 2^31 */
if (length >= (1UL<<31)) {
err = -EINVAL;
- rxe_dbg("message length too long");
+ rxe_dbg("message length too long\n");
goto err_out;
}
@@ -1000,7 +995,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
return 0;
err_out:
- rxe_dbg("returned err = %d", err);
+ rxe_dbg("returned err = %d\n", err);
return err;
}
@@ -1016,7 +1011,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
- rxe_err_qp(qp, "qp has been destroyed");
+ rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
@@ -1024,14 +1019,14 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
- rxe_dbg_qp(qp, "qp not ready to post recv");
+ rxe_dbg_qp(qp, "qp not ready to post recv\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(qp->srq)) {
*bad_wr = wr;
- rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead");
+ rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n");
return -EINVAL;
}
@@ -1050,7 +1045,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR)
- rxe_sched_task(&qp->resp.task);
+ rxe_sched_task(&qp->recv_task);
spin_unlock_irqrestore(&qp->state_lock, flags);
return err;
@@ -1069,7 +1064,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
uresp = udata->outbuf;
@@ -1077,26 +1072,26 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (attr->flags) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err);
+ rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err);
goto err_out;
}
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err) {
- rxe_dbg_dev(rxe, "bad init attributes, err = %d", err);
+ rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->cq_pool, cq);
if (err) {
- rxe_dbg_dev(rxe, "unable to create cq, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err);
goto err_out;
}
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
uresp);
if (err) {
- rxe_dbg_cq(cq, "create cq failed, err = %d", err);
+ rxe_dbg_cq(cq, "create cq failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1105,9 +1100,9 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err_cleanup:
cleanup_err = rxe_cleanup(cq);
if (cleanup_err)
- rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -1121,7 +1116,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_cq(cq, "malformed udata");
+ rxe_dbg_cq(cq, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@@ -1129,20 +1124,20 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
if (err) {
- rxe_dbg_cq(cq, "bad attr, err = %d", err);
+ rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
goto err_out;
}
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err) {
- rxe_dbg_cq(cq, "resize cq failed, err = %d", err);
+ rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err);
goto err_out;
}
return 0;
err_out:
- rxe_err_cq(cq, "returned err = %d", err);
+ rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@@ -1206,18 +1201,18 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
*/
if (atomic_read(&cq->num_wq)) {
err = -EINVAL;
- rxe_dbg_cq(cq, "still in use");
+ rxe_dbg_cq(cq, "still in use\n");
goto err_out;
}
err = rxe_cleanup(cq);
if (err)
- rxe_err_cq(cq, "cleanup failed, err = %d", err);
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", err);
return 0;
err_out:
- rxe_err_cq(cq, "returned err = %d", err);
+ rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@@ -1235,7 +1230,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
- rxe_dbg_dev(rxe, "unable to create mr");
+ rxe_dbg_dev(rxe, "unable to create mr\n");
goto err_free;
}
@@ -1249,7 +1244,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err_free:
kfree(mr);
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1263,7 +1258,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
int err, cleanup_err;
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
- rxe_err_pd(pd, "access = %#x not supported (%#x)", access,
+ rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
RXE_ACCESS_SUPPORTED_MR);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -1274,7 +1269,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
- rxe_dbg_pd(pd, "unable to create mr");
+ rxe_dbg_pd(pd, "unable to create mr\n");
goto err_free;
}
@@ -1282,9 +1277,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
- err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
+ err = rxe_mr_init_user(rxe, start, length, access, mr);
if (err) {
- rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err);
+ rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1294,10 +1289,10 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
err_free:
kfree(mr);
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1314,7 +1309,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
* rereg_pd and rereg_access
*/
if (flags & ~RXE_MR_REREG_SUPPORTED) {
- rxe_err_mr(mr, "flags = %#x not supported", flags);
+ rxe_err_mr(mr, "flags = %#x not supported\n", flags);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -1326,7 +1321,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
if (flags & IB_MR_REREG_ACCESS) {
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
- rxe_err_mr(mr, "access = %#x not supported", access);
+ rxe_err_mr(mr, "access = %#x not supported\n", access);
return ERR_PTR(-EOPNOTSUPP);
}
mr->access = access;
@@ -1345,7 +1340,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (mr_type != IB_MR_TYPE_MEM_REG) {
err = -EINVAL;
- rxe_dbg_pd(pd, "mr type %d not supported, err = %d",
+ rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n",
mr_type, err);
goto err_out;
}
@@ -1364,7 +1359,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err = rxe_mr_init_fast(max_num_sg, mr);
if (err) {
- rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err);
+ rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1374,11 +1369,11 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", err);
err_free:
kfree(mr);
err_out:
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1390,19 +1385,19 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
/* See IBA 10.6.7.2.6 */
if (atomic_read(&mr->num_mw) > 0) {
err = -EINVAL;
- rxe_dbg_mr(mr, "mr has mw's bound");
+ rxe_dbg_mr(mr, "mr has mw's bound\n");
goto err_out;
}
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
kfree_rcu_mightsleep(mr);
return 0;
err_out:
- rxe_err_mr(mr, "returned err = %d", err);
+ rxe_err_mr(mr, "returned err = %d\n", err);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index ccb9d19ffe..3c1354f822 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -113,7 +113,7 @@ struct rxe_req_info {
int need_retry;
int wait_for_rnr_timer;
int noack_pkts;
- struct rxe_task task;
+ int again;
};
struct rxe_comp_info {
@@ -124,7 +124,6 @@ struct rxe_comp_info {
int started_retry;
u32 retry_cnt;
u32 rnr_retry;
- struct rxe_task task;
};
enum rdatm_res_state {
@@ -196,7 +195,6 @@ struct rxe_resp_info {
unsigned int res_head;
unsigned int res_tail;
struct resp_res *res;
- struct rxe_task task;
};
struct rxe_qp {
@@ -229,6 +227,9 @@ struct rxe_qp {
struct sk_buff_head req_pkts;
struct sk_buff_head resp_pkts;
+ struct rxe_task send_task;
+ struct rxe_task recv_task;
+
struct rxe_req_info req;
struct rxe_comp_info comp;
struct rxe_resp_info resp;