diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /drivers/infiniband | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband')
37 files changed, 735 insertions, 551 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index f253295795..be0743dac3 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -348,16 +348,10 @@ static int dst_fetch_ha(const struct dst_entry *dst, static bool has_gateway(const struct dst_entry *dst, sa_family_t family) { - struct rtable *rt; - struct rt6_info *rt6; - - if (family == AF_INET) { - rt = container_of(dst, struct rtable, dst); - return rt->rt_uses_gateway; - } + if (family == AF_INET) + return dst_rtable(dst)->rt_uses_gateway; - rt6 = container_of(dst, struct rt6_info, dst); - return rt6->rt6i_flags & RTF_GATEWAY; + return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY; } static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6de05ade2b..3d3ee3eca9 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2737,7 +2737,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { case IB_FLOW_SPEC_ETH: - ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_eth_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); @@ -2748,7 +2748,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_IPV4: - ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_ipv4_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); @@ -2759,7 +2759,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_IPV6: - ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_ipv6_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); @@ -2775,7 +2775,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, break; case IB_FLOW_SPEC_TCP: case IB_FLOW_SPEC_UDP: - ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_tcp_udp_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); @@ -2786,7 +2786,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_VXLAN_TUNNEL: - ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_tunnel_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); @@ -2801,7 +2801,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, return -EINVAL; break; case IB_FLOW_SPEC_ESP: - ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_esp_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); @@ -2812,7 +2812,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_GRE: - ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_gre_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); @@ -2823,7 +2823,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz); break; case IB_FLOW_SPEC_MPLS: - ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz); + ib_filter_sz = sizeof(struct ib_flow_mpls_filter); actual_filter_sz = spec_filter_size(kern_spec_mask, kern_filter_sz, ib_filter_sz); diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index d9799706c5..f80da6a67e 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -36,13 +36,15 @@ #include "uverbs.h" struct bundle_alloc_head { - struct bundle_alloc_head *next; + struct_group_tagged(bundle_alloc_head_hdr, hdr, + struct bundle_alloc_head *next; + ); u8 data[]; }; struct bundle_priv { /* Must be first */ - struct bundle_alloc_head alloc_head; + struct bundle_alloc_head_hdr alloc_head; struct bundle_alloc_head *allocated_mem; size_t internal_avail; size_t internal_used; @@ -64,7 +66,7 @@ struct bundle_priv { * Must be last. bundle ends in a flex array which overlaps * internal_buffer. */ - struct uverbs_attr_bundle bundle; + struct uverbs_attr_bundle_hdr bundle; u64 internal_buffer[32]; }; @@ -77,9 +79,10 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm, unsigned int num_attrs) { struct bundle_priv *pbundle; + struct uverbs_attr_bundle *bundle; size_t bundle_size = offsetof(struct bundle_priv, internal_buffer) + - sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len + + sizeof(*bundle->attrs) * method_elm->key_bitmap_len + sizeof(*pbundle->uattrs) * num_attrs; method_elm->use_stack = bundle_size <= sizeof(*pbundle); @@ -107,7 +110,7 @@ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size, gfp_t flags) { struct bundle_priv *pbundle = - container_of(bundle, struct bundle_priv, bundle); + container_of(&bundle->hdr, struct bundle_priv, bundle); size_t new_used; void *res; @@ -149,7 +152,7 @@ static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, const struct uverbs_attr *attr) { struct bundle_priv *pbundle = - container_of(bundle, struct bundle_priv, bundle); + container_of(&bundle->hdr, struct bundle_priv, bundle); u16 flags; flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | @@ -166,6 +169,8 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle, struct ib_uverbs_attr *uattr, u32 attr_bkey) { + struct uverbs_attr_bundle *bundle = + container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); const struct uverbs_attr_spec *spec = &attr_uapi->spec; size_t array_len; u32 *idr_vals; @@ -184,7 +189,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle, return -EINVAL; attr->uobjects = - uverbs_alloc(&pbundle->bundle, + uverbs_alloc(bundle, array_size(array_len, sizeof(*attr->uobjects))); if (IS_ERR(attr->uobjects)) return PTR_ERR(attr->uobjects); @@ -209,7 +214,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle, for (i = 0; i != array_len; i++) { attr->uobjects[i] = uverbs_get_uobject_from_file( spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access, - idr_vals[i], &pbundle->bundle); + idr_vals[i], bundle); if (IS_ERR(attr->uobjects[i])) { ret = PTR_ERR(attr->uobjects[i]); break; @@ -240,7 +245,9 @@ static int uverbs_process_attr(struct bundle_priv *pbundle, struct ib_uverbs_attr *uattr, u32 attr_bkey) { const struct uverbs_attr_spec *spec = &attr_uapi->spec; - struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey]; + struct uverbs_attr_bundle *bundle = + container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); + struct uverbs_attr *e = &bundle->attrs[attr_bkey]; const struct uverbs_attr_spec *val_spec = spec; struct uverbs_obj_attr *o_attr; @@ -288,7 +295,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle, if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) { void *p; - p = uverbs_alloc(&pbundle->bundle, uattr->len); + p = uverbs_alloc(bundle, uattr->len); if (IS_ERR(p)) return PTR_ERR(p); @@ -321,7 +328,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle, */ o_attr->uobject = uverbs_get_uobject_from_file( spec->u.obj.obj_type, spec->u.obj.access, - uattr->data_s64, &pbundle->bundle); + uattr->data_s64, bundle); if (IS_ERR(o_attr->uobject)) return PTR_ERR(o_attr->uobject); __set_bit(attr_bkey, pbundle->uobj_finalize); @@ -422,6 +429,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, unsigned int num_attrs) { int (*handler)(struct uverbs_attr_bundle *attrs); + struct uverbs_attr_bundle *bundle = + container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs); unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey; unsigned int i; @@ -434,7 +443,7 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, if (!handler) return -EIO; - pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size); + pbundle->uattrs = uverbs_alloc(bundle, uattrs_size); if (IS_ERR(pbundle->uattrs)) return PTR_ERR(pbundle->uattrs); if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size)) @@ -453,25 +462,23 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, return -EINVAL; if (pbundle->method_elm->has_udata) - uverbs_fill_udata(&pbundle->bundle, - &pbundle->bundle.driver_udata, + uverbs_fill_udata(bundle, &pbundle->bundle.driver_udata, UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT); else pbundle->bundle.driver_udata = (struct ib_udata){}; if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) { - struct uverbs_obj_attr *destroy_attr = - &pbundle->bundle.attrs[destroy_bkey].obj_attr; + struct uverbs_obj_attr *destroy_attr = &bundle->attrs[destroy_bkey].obj_attr; - ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle); + ret = uobj_destroy(destroy_attr->uobject, bundle); if (ret) return ret; __clear_bit(destroy_bkey, pbundle->uobj_finalize); - ret = handler(&pbundle->bundle); + ret = handler(bundle); uobj_put_destroy(destroy_attr->uobject); } else { - ret = handler(&pbundle->bundle); + ret = handler(bundle); } /* @@ -481,10 +488,10 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, */ if (!ret && pbundle->method_elm->has_udata) { const struct uverbs_attr *attr = - uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); + uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT); if (!IS_ERR(attr)) - ret = uverbs_set_output(&pbundle->bundle, attr); + ret = uverbs_set_output(bundle, attr); } /* @@ -501,6 +508,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, static void bundle_destroy(struct bundle_priv *pbundle, bool commit) { unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len; + struct uverbs_attr_bundle *bundle = + container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); struct bundle_alloc_head *memblock; unsigned int i; @@ -508,20 +517,19 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit) i = -1; while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len, i + 1)) < key_bitmap_len) { - struct uverbs_attr *attr = &pbundle->bundle.attrs[i]; + struct uverbs_attr *attr = &bundle->attrs[i]; uverbs_finalize_object( attr->obj_attr.uobject, attr->obj_attr.attr_elm->spec.u.obj.access, test_bit(i, pbundle->uobj_hw_obj_valid), - commit, - &pbundle->bundle); + commit, bundle); } i = -1; while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len, i + 1)) < key_bitmap_len) { - struct uverbs_attr *attr = &pbundle->bundle.attrs[i]; + struct uverbs_attr *attr = &bundle->attrs[i]; const struct uverbs_api_attr *attr_uapi; void __rcu **slot; @@ -535,7 +543,7 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit) if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) { uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr, - commit, &pbundle->bundle); + commit, bundle); } } @@ -578,7 +586,8 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile, method_elm->bundle_size - offsetof(struct bundle_priv, internal_buffer); pbundle->alloc_head.next = NULL; - pbundle->allocated_mem = &pbundle->alloc_head; + pbundle->allocated_mem = container_of(&pbundle->alloc_head, + struct bundle_alloc_head, hdr); } else { pbundle = &onstack; pbundle->internal_avail = sizeof(pbundle->internal_buffer); @@ -596,8 +605,9 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile, pbundle->user_attrs = user_attrs; pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len * - sizeof(*pbundle->bundle.attrs), - sizeof(*pbundle->internal_buffer)); + sizeof(*container_of(&pbundle->bundle, + struct uverbs_attr_bundle, hdr)->attrs), + sizeof(*pbundle->internal_buffer)); memset(pbundle->bundle.attr_present, 0, sizeof(pbundle->bundle.attr_present)); memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize)); @@ -700,11 +710,13 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, unsigned int attr_out) { struct bundle_priv *pbundle = - container_of(bundle, struct bundle_priv, bundle); + container_of(&bundle->hdr, struct bundle_priv, bundle); + struct uverbs_attr_bundle *bundle_aux = + container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); const struct uverbs_attr *in = - uverbs_attr_get(&pbundle->bundle, attr_in); + uverbs_attr_get(bundle_aux, attr_in); const struct uverbs_attr *out = - uverbs_attr_get(&pbundle->bundle, attr_out); + uverbs_attr_get(bundle_aux, attr_out); if (!IS_ERR(in)) { udata->inlen = in->ptr_attr.len; @@ -829,7 +841,7 @@ void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle, u16 idx) { struct bundle_priv *pbundle = - container_of(bundle, struct bundle_priv, bundle); + container_of(&bundle->hdr, struct bundle_priv, bundle); __set_bit(uapi_bkey_attr(uapi_key_attr(idx)), pbundle->uobj_hw_obj_valid); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 9dca451ed5..6974922e56 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -107,8 +107,6 @@ struct bnxt_re_gsi_context { struct bnxt_re_sqp_entries *sqp_tbl; }; -#define BNXT_RE_MIN_MSIX 2 -#define BNXT_RE_MAX_MSIX 9 #define BNXT_RE_AEQ_IDX 0 #define BNXT_RE_NQ_IDX 1 #define BNXT_RE_GEN_P5_MAX_VF 64 @@ -168,7 +166,7 @@ struct bnxt_re_dev { struct bnxt_qplib_rcfw rcfw; /* NQ */ - struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; + struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX]; /* Device Resources */ struct bnxt_qplib_dev_attr dev_attr; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 50cb2259bf..fb8a0c2488 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -930,8 +930,6 @@ void c4iw_id_table_free(struct c4iw_id_table *alloc); typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb); -int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, - struct l2t_entry *l2t); void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid, struct c4iw_dev_ucontext *uctx); u32 c4iw_get_resource(struct c4iw_id_table *id_table); diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index e2bdec32ae..926f9ff1f6 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h @@ -57,6 +57,7 @@ struct efa_dev { u64 db_bar_addr; u64 db_bar_len; + unsigned int num_irq_vectors; int admin_msix_vector_idx; struct efa_irq admin_irq; diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 7b1910a862..5fa3603c80 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -322,7 +322,9 @@ static int efa_create_eqs(struct efa_dev *dev) int err; int i; - neqs = min_t(unsigned int, neqs, num_online_cpus()); + neqs = min_t(unsigned int, neqs, + dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE); + dev->neqs = neqs; dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL); if (!dev->eqs) @@ -468,34 +470,30 @@ static void efa_disable_msix(struct efa_dev *dev) static int efa_enable_msix(struct efa_dev *dev) { - int msix_vecs, irq_num; + int max_vecs, num_vecs; /* * Reserve the max msix vectors we might need, one vector is reserved * for admin. */ - msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev), - num_online_cpus() + 1); + max_vecs = min_t(int, pci_msix_vec_count(dev->pdev), + num_online_cpus() + 1); dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n", - msix_vecs); + max_vecs); dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX; - irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs, - msix_vecs, PCI_IRQ_MSIX); + num_vecs = pci_alloc_irq_vectors(dev->pdev, 1, + max_vecs, PCI_IRQ_MSIX); - if (irq_num < 0) { - dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n", - irq_num); + if (num_vecs < 0) { + dev_err(&dev->pdev->dev, "Failed to enable MSI-X. error %d\n", + num_vecs); return -ENOSPC; } - if (irq_num != msix_vecs) { - efa_disable_msix(dev); - dev_err(&dev->pdev->dev, - "Allocated %d MSI-X (out of %d requested)\n", - irq_num, msix_vecs); - return -ENOSPC; - } + dev_dbg(&dev->pdev->dev, "Allocated %d MSI-X vectors\n", num_vecs); + + dev->num_irq_vectors = num_vecs; return 0; } diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 18b05ffb41..c465966a1d 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -315,7 +315,7 @@ int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) * This routine returns the receive context associated * with a a qp's qpn. * - * Returns the context. + * Return: the context. */ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi, struct rvt_qp *qp) @@ -710,7 +710,7 @@ void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp) * The exp_lock must be held. * * Return: - * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1 + * On success: a value positive value between 0 and RXE_NUM_TID_FLOWS - 1 * On failure: -EAGAIN */ static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last) @@ -1007,7 +1007,7 @@ static u32 tid_flush_pages(struct tid_rdma_pageset *list, * pages are tested two at a time, i, i + 1 for contiguous * pages and i - 1 and i contiguous pages. * - * If any condition is false, any accumlated pages are flushed and + * If any condition is false, any accumulated pages are flushed and * v0,v1 are emitted as separate PAGE_SIZE pagesets * * Otherwise, the current 8k is totaled for a future flush. @@ -1434,7 +1434,7 @@ static void kern_program_rcvarray(struct tid_rdma_flow *flow) * (5) computes a tidarray with formatted TID entries which can be sent * to the sender * (6) Reserves and programs HW flows. - * (7) It also manages queing the QP when TID/flow resources are not + * (7) It also manages queueing the QP when TID/flow resources are not * available. * * @req points to struct tid_rdma_request of which the segments are a part. The @@ -1604,7 +1604,7 @@ void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req) } /** - * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information + * hfi1_kern_exp_rcv_free_flows - free previously allocated flow information * @req: the tid rdma request to be cleaned */ static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req) @@ -2055,7 +2055,7 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet, * req->clear_tail is advanced). However, when an earlier * request is received, this request will not be complete any * more (qp->s_tail_ack_queue is moved back, see below). - * Consequently, we need to update the TID flow info everytime + * Consequently, we need to update the TID flow info every time * a duplicate request is received. */ bth0 = be32_to_cpu(ohdr->bth[0]); @@ -2219,7 +2219,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet) /* * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ * (see hfi1_rc_rcv()) - * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue) + * 2. Put TID RDMA READ REQ into the response queue (s_ack_queue) * - Setup struct tid_rdma_req with request info * - Initialize struct tid_rdma_flow info; * - Copy TID entries; @@ -2439,7 +2439,7 @@ find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode) void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet) { - /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */ + /* HANDLER FOR TID RDMA READ RESPONSE packet (Requester side) */ /* * 1. Find matching SWQE @@ -3649,7 +3649,7 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet) * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST * (see hfi1_rc_rcv()) * - Don't allow 0-length requests. - * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue) + * 2. Put TID RDMA WRITE REQ into the response queue (s_ack_queue) * - Setup struct tid_rdma_req with request info * - Prepare struct tid_rdma_flow array? * 3. Set the qp->s_ack_state as state diagram in design doc. @@ -4026,7 +4026,7 @@ unlock_r_lock: void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet) { - /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */ + /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requester side) */ /* * 1. Find matching SWQE @@ -5440,8 +5440,9 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) * the two state machines can step on each other with respect to the * RVT_S_BUSY flag. * Therefore, a modified test is used. - * @return true if the second leg is scheduled; - * false if the second leg is not scheduled. + * + * Return: %true if the second leg is scheduled; + * %false if the second leg is not scheduled. */ bool hfi1_schedule_tid_send(struct rvt_qp *qp) { diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h index 052a3d6090..11dbbabebd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.h +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h @@ -108,6 +108,9 @@ enum { HNS_ROCE_CMD_QUERY_CEQC = 0x92, HNS_ROCE_CMD_DESTROY_CEQC = 0x93, + /* SCC CTX commands */ + HNS_ROCE_CMD_QUERY_SCCC = 0xa2, + /* SCC CTX BT commands */ HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4, HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5, diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 2517c972c6..68e22f368d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -133,14 +133,12 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct ib_device *ibdev = &hr_dev->ib_dev; u64 mtts[MTT_MIN_COUNT] = {}; - dma_addr_t dma_handle; int ret; - ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), - &dma_handle); - if (!ret) { + ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts)); + if (ret) { ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret); - return -EINVAL; + return ret; } /* Get CQC memory HEM(Hardware Entry Memory) table */ @@ -157,7 +155,8 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) goto err_put; } - ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle); + ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, + hns_roce_get_mtr_ba(&hr_cq->mtr)); if (ret) goto err_xa; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 46f8a63109..0b47c6d688 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -100,6 +100,9 @@ #define CQ_BANKID_SHIFT 2 #define CQ_BANKID_MASK GENMASK(1, 0) +#define HNS_ROCE_MAX_CQ_COUNT 0xFFFF +#define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF + enum { SERV_TYPE_RC, SERV_TYPE_UC, @@ -179,6 +182,7 @@ enum { #define HNS_ROCE_CMD_SUCCESS 1 +#define HNS_ROCE_MAX_HOP_NUM 3 /* The minimum page size is 4K for hardware */ #define HNS_HW_PAGE_SHIFT 12 #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT) @@ -269,6 +273,11 @@ struct hns_roce_hem_list { dma_addr_t root_ba; /* pointer to the root ba table */ }; +enum mtr_type { + MTR_DEFAULT = 0, + MTR_PBL, +}; + struct hns_roce_buf_attr { struct { size_t size; /* region size */ @@ -277,7 +286,10 @@ struct hns_roce_buf_attr { unsigned int region_count; /* valid region count */ unsigned int page_shift; /* buffer page shift */ unsigned int user_access; /* umem access flag */ + u64 iova; + enum mtr_type type; bool mtt_only; /* only alloc buffer-required MTT memory */ + bool adaptive; /* adaptive for page_shift and hopnum */ }; struct hns_roce_hem_cfg { @@ -836,7 +848,8 @@ struct hns_roce_caps { u16 default_aeq_period; u16 default_aeq_arm_st; u16 default_ceq_arm_st; - enum hns_roce_cong_type cong_type; + u8 cong_cap; + enum hns_roce_cong_type default_cong_type; }; enum hns_roce_device_state { @@ -937,6 +950,7 @@ struct hns_roce_hw { int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer); int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer); int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer); + int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer); int (*query_hw_counter)(struct hns_roce_dev *hr_dev, u64 *stats, u32 port, int *hw_counters); const struct ib_device_ops *hns_roce_dev_ops; @@ -1153,8 +1167,13 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); /* hns roce hw need current block and next block addr from mtt */ #define MTT_MIN_COUNT 2 +static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr) +{ + return mtr->hem_cfg.root_ba; +} + int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + u32 offset, u64 *mtt_buf, int mtt_max); int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_attr *buf_attr, unsigned int page_shift, struct ib_udata *udata, diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index c4ac06a338..658c522be7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -249,85 +249,48 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, } static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, - int npages, unsigned long hem_alloc_size, gfp_t gfp_mask) { - struct hns_roce_hem_chunk *chunk = NULL; struct hns_roce_hem *hem; - struct scatterlist *mem; int order; void *buf; WARN_ON(gfp_mask & __GFP_HIGHMEM); + order = get_order(hem_alloc_size); + if (PAGE_SIZE << order != hem_alloc_size) { + dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n", + hem_alloc_size); + return NULL; + } + hem = kmalloc(sizeof(*hem), gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); if (!hem) return NULL; - INIT_LIST_HEAD(&hem->chunk_list); - - order = get_order(hem_alloc_size); + buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size, + &hem->dma, gfp_mask); + if (!buf) + goto fail; - while (npages > 0) { - if (!chunk) { - chunk = kmalloc(sizeof(*chunk), - gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); - if (!chunk) - goto fail; - - sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN); - chunk->npages = 0; - chunk->nsg = 0; - memset(chunk->buf, 0, sizeof(chunk->buf)); - list_add_tail(&chunk->list, &hem->chunk_list); - } - - while (1 << order > npages) - --order; - - /* - * Alloc memory one time. If failed, don't alloc small block - * memory, directly return fail. - */ - mem = &chunk->mem[chunk->npages]; - buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order, - &sg_dma_address(mem), gfp_mask); - if (!buf) - goto fail; - - chunk->buf[chunk->npages] = buf; - sg_dma_len(mem) = PAGE_SIZE << order; - - ++chunk->npages; - ++chunk->nsg; - npages -= 1 << order; - } + hem->buf = buf; + hem->size = hem_alloc_size; return hem; fail: - hns_roce_free_hem(hr_dev, hem); + kfree(hem); return NULL; } void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem) { - struct hns_roce_hem_chunk *chunk, *tmp; - int i; - if (!hem) return; - list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) { - for (i = 0; i < chunk->npages; ++i) - dma_free_coherent(hr_dev->dev, - sg_dma_len(&chunk->mem[i]), - chunk->buf[i], - sg_dma_address(&chunk->mem[i])); - kfree(chunk); - } + dma_free_coherent(hr_dev->dev, hem->size, hem->buf, hem->dma); kfree(hem); } @@ -415,7 +378,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, { u32 bt_size = mhop->bt_chunk_size; struct device *dev = hr_dev->dev; - struct hns_roce_hem_iter iter; gfp_t flag; u64 bt_ba; u32 size; @@ -456,16 +418,15 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev, */ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size; flag = GFP_KERNEL | __GFP_NOWARN; - table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT, - size, flag); + table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag); if (!table->hem[index->buf]) { ret = -ENOMEM; goto err_alloc_hem; } index->inited |= HEM_INDEX_BUF; - hns_roce_hem_first(table->hem[index->buf], &iter); - bt_ba = hns_roce_hem_addr(&iter); + bt_ba = table->hem[index->buf]->dma; + if (table->type < HEM_TYPE_MTT) { if (mhop->hop_num == 2) *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba; @@ -586,7 +547,6 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, } table->hem[i] = hns_roce_alloc_hem(hr_dev, - table->table_chunk_size >> PAGE_SHIFT, table->table_chunk_size, GFP_KERNEL | __GFP_NOWARN); if (!table->hem[i]) { @@ -725,7 +685,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj, dma_addr_t *dma_handle) { - struct hns_roce_hem_chunk *chunk; struct hns_roce_hem_mhop mhop; struct hns_roce_hem *hem; unsigned long mhop_obj = obj; @@ -734,7 +693,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, int offset, dma_offset; void *addr = NULL; u32 hem_idx = 0; - int length; int i, j; mutex_lock(&table->mutex); @@ -767,23 +725,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, if (!hem) goto out; - list_for_each_entry(chunk, &hem->chunk_list, list) { - for (i = 0; i < chunk->npages; ++i) { - length = sg_dma_len(&chunk->mem[i]); - if (dma_handle && dma_offset >= 0) { - if (length > (u32)dma_offset) - *dma_handle = sg_dma_address( - &chunk->mem[i]) + dma_offset; - dma_offset -= length; - } - - if (length > (u32)offset) { - addr = chunk->buf[i] + offset; - goto out; - } - offset -= length; - } - } + *dma_handle = hem->dma + dma_offset; + addr = hem->buf + offset; out: mutex_unlock(&table->mutex); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index fea6d7d508..9c415b2541 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -56,10 +56,6 @@ enum { HEM_TYPE_TRRL, }; -#define HNS_ROCE_HEM_CHUNK_LEN \ - ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ - (sizeof(struct scatterlist) + sizeof(void *))) - #define check_whether_bt_num_3(type, hop_num) \ ((type) < HEM_TYPE_MTT && (hop_num) == 2) @@ -72,25 +68,13 @@ enum { ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \ ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0)) -struct hns_roce_hem_chunk { - struct list_head list; - int npages; - int nsg; - struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN]; - void *buf[HNS_ROCE_HEM_CHUNK_LEN]; -}; - struct hns_roce_hem { - struct list_head chunk_list; + void *buf; + dma_addr_t dma; + unsigned long size; refcount_t refcount; }; -struct hns_roce_hem_iter { - struct hns_roce_hem *hem; - struct hns_roce_hem_chunk *chunk; - int page_idx; -}; - struct hns_roce_hem_mhop { u32 hop_num; u32 buf_chunk_size; @@ -133,38 +117,4 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list, int offset, int *mtt_cnt); -static inline void hns_roce_hem_first(struct hns_roce_hem *hem, - struct hns_roce_hem_iter *iter) -{ - iter->hem = hem; - iter->chunk = list_empty(&hem->chunk_list) ? NULL : - list_entry(hem->chunk_list.next, - struct hns_roce_hem_chunk, list); - iter->page_idx = 0; -} - -static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter) -{ - return !iter->chunk; -} - -static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter) -{ - if (++iter->page_idx >= iter->chunk->nsg) { - if (iter->chunk->list.next == &iter->hem->chunk_list) { - iter->chunk = NULL; - return; - } - - iter->chunk = list_entry(iter->chunk->list.next, - struct hns_roce_hem_chunk, list); - iter->page_idx = 0; - } -} - -static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter) -{ - return sg_dma_address(&iter->chunk->mem[iter->page_idx]); -} - #endif /* _HNS_ROCE_HEM_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index f95ec4618f..8800464c9a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2209,11 +2209,12 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS); - caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE); + caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP); caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); + caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG); caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); @@ -3195,21 +3196,22 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t pbl_ba; - int i, count; + int ret; + int i; - count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - min_t(int, ARRAY_SIZE(pages), mr->npages), - &pbl_ba); - if (count < 1) { - ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", - count); - return -ENOBUFS; + ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, + min_t(int, ARRAY_SIZE(pages), mr->npages)); + if (ret) { + ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret); + return ret; } /* Aligned to the hardware address access unit */ - for (i = 0; i < count; i++) + for (i = 0; i < ARRAY_SIZE(pages); i++) pages[i] >>= 6; + pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); + mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); @@ -3308,18 +3310,12 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, struct hns_roce_mr *mr) { - struct ib_device *ibdev = &hr_dev->ib_dev; + dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); struct hns_roce_v2_mpt_entry *mpt_entry; - dma_addr_t pbl_ba = 0; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); - if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) { - ibdev_err(ibdev, "failed to find frmr mtr.\n"); - return -ENOBUFS; - } - hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); hr_reg_write(mpt_entry, MPT_PD, mr->pd); @@ -4064,7 +4060,6 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, u32 step_idx) { - struct hns_roce_hem_iter iter; struct hns_roce_hem_mhop mhop; struct hns_roce_hem *hem; unsigned long mhop_obj = obj; @@ -4101,12 +4096,8 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, if (check_whether_last_step(hop_num, step_idx)) { hem = table->hem[hem_idx]; - for (hns_roce_hem_first(hem, &iter); - !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { - bt_ba = hns_roce_hem_addr(&iter); - ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, - step_idx); - } + + ret = set_hem_to_hw(hr_dev, obj, hem->dma, table->type, step_idx); } else { if (step_idx == 0) bt_ba = table->bt_l0_dma_addr[i]; @@ -4347,17 +4338,20 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, { u64 mtts[MTT_MIN_COUNT] = { 0 }; u64 wqe_sge_ba; - int count; + int ret; /* Search qp buf's mtts */ - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, - MTT_MIN_COUNT, &wqe_sge_ba); - if (hr_qp->rq.wqe_cnt && count < 1) { + ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, + MTT_MIN_COUNT); + if (hr_qp->rq.wqe_cnt && ret) { ibdev_err(&hr_dev->ib_dev, - "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn); - return -EINVAL; + "failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n", + hr_qp->qpn, ret); + return ret; } + wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr); + context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; @@ -4419,23 +4413,23 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, struct ib_device *ibdev = &hr_dev->ib_dev; u64 sge_cur_blk = 0; u64 sq_cur_blk = 0; - int count; + int ret; /* search qp buf's mtts */ - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); - if (count < 1) { - ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n", - hr_qp->qpn); - return -EINVAL; + ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset, + &sq_cur_blk, 1); + if (ret) { + ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n", + hr_qp->qpn, ret); + return ret; } if (hr_qp->sge.sge_cnt > 0) { - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, - hr_qp->sge.offset, - &sge_cur_blk, 1, NULL); - if (count < 1) { - ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n", - hr_qp->qpn); - return -EINVAL; + ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, + hr_qp->sge.offset, &sge_cur_blk, 1); + if (ret) { + ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n", + hr_qp->qpn, ret); + return ret; } } @@ -4745,14 +4739,8 @@ enum { static int check_cong_type(struct ib_qp *ibqp, struct hns_roce_congestion_algorithm *cong_alg) { - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI) - hr_qp->cong_type = CONG_TYPE_DCQCN; - else - hr_qp->cong_type = hr_dev->caps.cong_type; - /* different congestion types match different configurations */ switch (hr_qp->cong_type) { case CONG_TYPE_DCQCN: @@ -4780,9 +4768,6 @@ static int check_cong_type(struct ib_qp *ibqp, cong_alg->wnd_mode_sel = WND_LIMIT; break; default: - ibdev_warn(&hr_dev->ib_dev, - "invalid type(%u) for congestion selection.\n", - hr_qp->cong_type); hr_qp->cong_type = CONG_TYPE_DCQCN; cong_alg->alg_sel = CONG_DCQCN; cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; @@ -5333,6 +5318,30 @@ out: return ret; } +static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn, + void *buffer) +{ + struct hns_roce_v2_scc_context *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC, + qpn); + if (ret) + goto out; + + context = mailbox->buf; + memcpy(buffer, context, sizeof(*context)); + +out: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev, struct hns_roce_v2_qp_context *context) { @@ -5586,18 +5595,20 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, struct ib_device *ibdev = srq->ibsrq.device; struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); u64 mtts_idx[MTT_MIN_COUNT] = {}; - dma_addr_t dma_handle_idx = 0; + dma_addr_t dma_handle_idx; int ret; /* Get physical address of idx que buf */ ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx, - ARRAY_SIZE(mtts_idx), &dma_handle_idx); - if (ret < 1) { + ARRAY_SIZE(mtts_idx)); + if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n", ret); - return -ENOBUFS; + return ret; } + dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr); + hr_reg_write(ctx, SRQC_IDX_HOP_NUM, to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); @@ -5629,20 +5640,22 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); struct hns_roce_srq_context *ctx = mb_buf; u64 mtts_wqe[MTT_MIN_COUNT] = {}; - dma_addr_t dma_handle_wqe = 0; + dma_addr_t dma_handle_wqe; int ret; memset(ctx, 0, sizeof(*ctx)); /* Get the physical address of srq buf */ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, - ARRAY_SIZE(mtts_wqe), &dma_handle_wqe); - if (ret < 1) { + ARRAY_SIZE(mtts_wqe)); + if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n", ret); - return -ENOBUFS; + return ret; } + dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr); + hr_reg_write(ctx, SRQC_SRQ_ST, 1); hr_reg_write_bool(ctx, SRQC_SRQ_TYPE, srq->ibsrq.srq_type == IB_SRQT_XRC); @@ -5790,7 +5803,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) dev_info(hr_dev->dev, "cq_period(%u) reached the upper limit, adjusted to 65.\n", cq_period); - cq_period = HNS_ROCE_MAX_CQ_PERIOD; + cq_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08; } cq_period *= HNS_ROCE_CLOCK_ADJUST; } @@ -6358,7 +6371,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, u64 eqe_ba[MTT_MIN_COUNT] = { 0 }; struct hns_roce_eq_context *eqc; u64 bt_ba = 0; - int count; + int ret; eqc = mb_buf; memset(eqc, 0, sizeof(struct hns_roce_eq_context)); @@ -6366,13 +6379,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, init_eq_config(hr_dev, eq); /* if not multi-hop, eqe buffer only use one trunk */ - count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT, - &bt_ba); - if (count < 1) { - dev_err(hr_dev->dev, "failed to find EQE mtr\n"); - return -ENOBUFS; + ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, + ARRAY_SIZE(eqe_ba)); + if (ret) { + dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret); + return ret; } + bt_ba = hns_roce_get_mtr_ba(&eq->mtr); + hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID); hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num); hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore); @@ -6719,6 +6734,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .query_qpc = hns_roce_v2_query_qpc, .query_mpt = hns_roce_v2_query_mpt, .query_srqc = hns_roce_v2_query_srqc, + .query_sccc = hns_roce_v2_query_sccc, .query_hw_counter = hns_roce_hw_v2_query_counter, .hns_roce_dev_ops = &hns_roce_v2_dev_ops, .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index cd97cbee68..dfed6b4ddb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -646,6 +646,12 @@ struct hns_roce_v2_qp_context { #define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23) #define QPCEX_STASH QPCEX_FIELD_LOC(82, 82) +#define SCC_CONTEXT_SIZE 16 + +struct hns_roce_v2_scc_context { + __le32 data[SCC_CONTEXT_SIZE]; +}; + #define V2_QP_RWE_S 1 /* rdma write enable */ #define V2_QP_RRE_S 2 /* rdma read enable */ #define V2_QP_ATE_S 3 /* rdma atomic enable */ @@ -1214,12 +1220,13 @@ struct hns_roce_query_pf_caps_d { #define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20) #define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22) #define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24) -#define PF_CAPS_D_CONG_TYPE PF_CAPS_D_FIELD_LOC(29, 26) +#define PF_CAPS_D_CONG_CAP PF_CAPS_D_FIELD_LOC(29, 26) #define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64) #define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86) #define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96) #define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118) #define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120) +#define PF_CAPS_D_DEFAULT_ALG PF_CAPS_D_FIELD_LOC(127, 122) #define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128) #define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148) #define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160) @@ -1327,7 +1334,7 @@ struct fmea_ram_ecc { /* only for RNR timeout issue of HIP08 */ #define HNS_ROCE_CLOCK_ADJUST 1000 -#define HNS_ROCE_MAX_CQ_PERIOD 65 +#define HNS_ROCE_MAX_CQ_PERIOD_HIP08 65 #define HNS_ROCE_MAX_EQ_PERIOD 65 #define HNS_ROCE_RNR_TIMER_10NS 1 #define HNS_ROCE_1US_CFG 999 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index a33d3cedbc..d202258368 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -41,6 +41,7 @@ #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" +#include "hns_roce_hw_v2.h" static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, const u8 *addr) @@ -193,6 +194,12 @@ static int hns_roce_query_device(struct ib_device *ib_dev, IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = 1; props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; + props->max_ah = INT_MAX; + props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD; + props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT; + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { props->max_srq = hr_dev->caps.num_srqs; props->max_srq_wr = hr_dev->caps.max_srq_wrs; @@ -395,6 +402,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS; } + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) + resp.congest_type = hr_dev->caps.cong_cap; + ret = hns_roce_uar_alloc(hr_dev, &context->uar); if (ret) goto error_out; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 0d42fd197c..80c050d7d0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -32,6 +32,7 @@ */ #include <linux/vmalloc.h> +#include <linux/count_zeros.h> #include <rdma/ib_umem.h> #include <linux/math.h> #include "hns_roce_device.h" @@ -103,14 +104,21 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, buf_attr.user_access = mr->access; /* fast MR's buffer is alloced before mapping, not at creation */ buf_attr.mtt_only = is_fast; + buf_attr.iova = mr->iova; + /* pagesize and hopnum is fixed for fast MR */ + buf_attr.adaptive = !is_fast; + buf_attr.type = MTR_PBL; err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr, hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT, udata, start); - if (err) + if (err) { ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err); - else - mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count; + return err; + } + + mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count; + mr->pbl_hop_num = buf_attr.region[0].hopnum; return err; } @@ -694,7 +702,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, mtr->umem = NULL; mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size, buf_attr->page_shift, - mtr->hem_cfg.is_direct ? + !mtr_has_mtt(buf_attr) ? HNS_ROCE_BUF_DIRECT : 0); if (IS_ERR(mtr->kmem)) { ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n", @@ -706,14 +714,41 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, return 0; } -static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int page_count, unsigned int page_shift) +static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr) +{ + struct hns_roce_buf_region *region; + int page_cnt = 0; + int i; + + for (i = 0; i < mtr->hem_cfg.region_count; i++) { + region = &mtr->hem_cfg.region[i]; + page_cnt += region->count; + } + + return page_cnt; +} + +static bool need_split_huge_page(struct hns_roce_mtr *mtr) +{ + /* When HEM buffer uses 0-level addressing, the page size is + * equal to the whole buffer size. If the current MTR has multiple + * regions, we split the buffer into small pages(4k, required by hns + * ROCEE). These pages will be used in multiple regions. + */ + return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1; +} + +static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) { struct ib_device *ibdev = &hr_dev->ib_dev; + int page_count = cal_mtr_pg_cnt(mtr); + unsigned int page_shift; dma_addr_t *pages; int npage; int ret; + page_shift = need_split_huge_page(mtr) ? HNS_HW_PAGE_SHIFT : + mtr->hem_cfg.buf_pg_shift; /* alloc a tmp array to store buffer's dma address */ pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL); if (!pages) @@ -733,7 +768,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, goto err_alloc_list; } - if (mtr->hem_cfg.is_direct && npage > 1) { + if (need_split_huge_page(mtr) && npage > 1) { ret = mtr_check_direct_pages(pages, npage, page_shift); if (ret) { ibdev_err(ibdev, "failed to check %s page: %d / %d.\n", @@ -808,47 +843,53 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, return ret; } -int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) +static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg, + u32 start_index, u64 *mtt_buf, + int mtt_cnt) { - struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; - int mtt_count, left; - u32 start_index; + int mtt_count; int total = 0; - __le64 *mtts; u32 npage; u64 addr; - if (!mtt_buf || mtt_max < 1) - goto done; - - /* no mtt memory in direct mode, so just return the buffer address */ - if (cfg->is_direct) { - start_index = offset >> HNS_HW_PAGE_SHIFT; - for (mtt_count = 0; mtt_count < cfg->region_count && - total < mtt_max; mtt_count++) { - npage = cfg->region[mtt_count].offset; - if (npage < start_index) - continue; + if (mtt_cnt > cfg->region_count) + return -EINVAL; - addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); - mtt_buf[total] = addr; + for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt; + mtt_count++) { + npage = cfg->region[mtt_count].offset; + if (npage < start_index) + continue; - total++; - } + addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); + mtt_buf[total] = addr; - goto done; + total++; } - start_index = offset >> cfg->buf_pg_shift; - left = mtt_max; + if (!total) + return -ENOENT; + + return 0; +} + +static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, u32 start_index, + u64 *mtt_buf, int mtt_cnt) +{ + int left = mtt_cnt; + int total = 0; + int mtt_count; + __le64 *mtts; + u32 npage; + while (left > 0) { mtt_count = 0; mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, start_index + total, &mtt_count); if (!mtts || !mtt_count) - goto done; + break; npage = min(mtt_count, left); left -= npage; @@ -856,69 +897,165 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]); } -done: - if (base_addr) - *base_addr = cfg->root_ba; + if (!total) + return -ENOENT; + + return 0; +} + +int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + u32 offset, u64 *mtt_buf, int mtt_max) +{ + struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; + u32 start_index; + int ret; + + if (!mtt_buf || mtt_max < 1) + return -EINVAL; + + /* no mtt memory in direct mode, so just return the buffer address */ + if (cfg->is_direct) { + start_index = offset >> HNS_HW_PAGE_SHIFT; + ret = hns_roce_get_direct_addr_mtt(cfg, start_index, + mtt_buf, mtt_max); + } else { + start_index = offset >> cfg->buf_pg_shift; + ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index, + mtt_buf, mtt_max); + } + return ret; +} + +static int get_best_page_shift(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *buf_attr) +{ + unsigned int page_sz; + + if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem) + return 0; + + page_sz = ib_umem_find_best_pgsz(mtr->umem, + hr_dev->caps.page_size_cap, + buf_attr->iova); + if (!page_sz) + return -EINVAL; + + buf_attr->page_shift = order_base_2(page_sz); + return 0; +} + +static int get_best_hop_num(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *buf_attr, + unsigned int ba_pg_shift) +{ +#define INVALID_HOPNUM -1 +#define MIN_BA_CNT 1 + size_t buf_pg_sz = 1 << buf_attr->page_shift; + struct ib_device *ibdev = &hr_dev->ib_dev; + size_t ba_pg_sz = 1 << ba_pg_shift; + int hop_num = INVALID_HOPNUM; + size_t unit = MIN_BA_CNT; + size_t ba_cnt; + int j; + + if (!buf_attr->adaptive || buf_attr->type != MTR_PBL) + return 0; + + /* Caculating the number of buf pages, each buf page need a BA */ + if (mtr->umem) + ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz); + else + ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz); + + for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) { + if (ba_cnt <= unit) { + hop_num = j; + break; + } + /* Number of BAs can be represented at per hop */ + unit *= ba_pg_sz / BA_BYTE_LEN; + } + + if (hop_num < 0) { + ibdev_err(ibdev, + "failed to calculate a valid hopnum.\n"); + return -EINVAL; + } - return total; + buf_attr->region[0].hopnum = hop_num; + + return 0; +} + +static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev, + struct hns_roce_buf_attr *attr) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + + if (attr->region_count > ARRAY_SIZE(attr->region) || + attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) { + ibdev_err(ibdev, + "invalid buf attr, region count %d, page shift %u.\n", + attr->region_count, attr->page_shift); + return false; + } + + return true; } static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, - struct hns_roce_buf_attr *attr, - struct hns_roce_hem_cfg *cfg, - unsigned int *buf_page_shift, u64 unalinged_size) + struct hns_roce_mtr *mtr, + struct hns_roce_buf_attr *attr) { + struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; struct hns_roce_buf_region *r; - u64 first_region_padding; - int page_cnt, region_cnt; - unsigned int page_shift; + size_t buf_pg_sz; size_t buf_size; + int page_cnt, i; + u64 pgoff = 0; + + if (!is_buf_attr_valid(hr_dev, attr)) + return -EINVAL; /* If mtt is disabled, all pages must be within a continuous range */ cfg->is_direct = !mtr_has_mtt(attr); + cfg->region_count = attr->region_count; buf_size = mtr_bufs_size(attr); - if (cfg->is_direct) { - /* When HEM buffer uses 0-level addressing, the page size is - * equal to the whole buffer size, and we split the buffer into - * small pages which is used to check whether the adjacent - * units are in the continuous space and its size is fixed to - * 4K based on hns ROCEE's requirement. - */ - page_shift = HNS_HW_PAGE_SHIFT; - - /* The ROCEE requires the page size to be 4K * 2 ^ N. */ + if (need_split_huge_page(mtr)) { + buf_pg_sz = HNS_HW_PAGE_SIZE; cfg->buf_pg_count = 1; + /* The ROCEE requires the page size to be 4K * 2 ^ N. */ cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE)); - first_region_padding = 0; } else { - page_shift = attr->page_shift; - cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size, - 1 << page_shift); - cfg->buf_pg_shift = page_shift; - first_region_padding = unalinged_size; + buf_pg_sz = 1 << attr->page_shift; + cfg->buf_pg_count = mtr->umem ? + ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) : + DIV_ROUND_UP(buf_size, buf_pg_sz); + cfg->buf_pg_shift = attr->page_shift; + pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0; } /* Convert buffer size to page index and page count for each region and * the buffer's offset needs to be appended to the first region. */ - for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count && - region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) { - r = &cfg->region[region_cnt]; + for (page_cnt = 0, i = 0; i < attr->region_count; i++) { + r = &cfg->region[i]; r->offset = page_cnt; - buf_size = hr_hw_page_align(attr->region[region_cnt].size + - first_region_padding); - r->count = DIV_ROUND_UP(buf_size, 1 << page_shift); - first_region_padding = 0; + buf_size = hr_hw_page_align(attr->region[i].size + pgoff); + if (attr->type == MTR_PBL && mtr->umem) + r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz); + else + r->count = DIV_ROUND_UP(buf_size, buf_pg_sz); + + pgoff = 0; page_cnt += r->count; - r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum, - r->count); + r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count); } - cfg->region_count = region_cnt; - *buf_page_shift = page_shift; - - return page_cnt; + return 0; } static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum) @@ -1006,50 +1143,58 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, unsigned long user_addr) { struct ib_device *ibdev = &hr_dev->ib_dev; - unsigned int buf_page_shift = 0; - int buf_page_cnt; int ret; - buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg, - &buf_page_shift, - udata ? user_addr & ~PAGE_MASK : 0); - if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) { - ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n", - buf_page_cnt, buf_page_shift); - return -EINVAL; - } - - ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift); - if (ret) { - ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret); - return ret; - } - /* The caller has its own buffer list and invokes the hns_roce_mtr_map() * to finish the MTT configuration. */ if (buf_attr->mtt_only) { mtr->umem = NULL; mtr->kmem = NULL; - return 0; + } else { + ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr); + if (ret) { + ibdev_err(ibdev, + "failed to alloc mtr bufs, ret = %d.\n", ret); + return ret; + } + + ret = get_best_page_shift(hr_dev, mtr, buf_attr); + if (ret) + goto err_init_buf; + + ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift); + if (ret) + goto err_init_buf; } - ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr); + ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr); + if (ret) + goto err_init_buf; + + ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift); if (ret) { - ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret); - goto err_alloc_mtt; + ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret); + goto err_init_buf; } + if (buf_attr->mtt_only) + return 0; + /* Write buffer's dma address to MTT */ - ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift); - if (ret) + ret = mtr_map_bufs(hr_dev, mtr); + if (ret) { ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret); - else - return 0; + goto err_alloc_mtt; + } + + return 0; - mtr_free_bufs(hr_dev, mtr); err_alloc_mtt: mtr_free_mtt(hr_dev, mtr); +err_init_buf: + mtr_free_bufs(hr_dev, mtr); + return ret; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 31b1472106..f35a66325d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1004,6 +1004,60 @@ static void free_kernel_wrid(struct hns_roce_qp *hr_qp) kfree(hr_qp->sq.wrid); } +static void default_congest_type(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + if (hr_qp->ibqp.qp_type == IB_QPT_UD || + hr_qp->ibqp.qp_type == IB_QPT_GSI) + hr_qp->cong_type = CONG_TYPE_DCQCN; + else + hr_qp->cong_type = hr_dev->caps.default_cong_type; +} + +static int set_congest_type(struct hns_roce_qp *hr_qp, + struct hns_roce_ib_create_qp *ucmd) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); + + switch (ucmd->cong_type_flags) { + case HNS_ROCE_CREATE_QP_FLAGS_DCQCN: + hr_qp->cong_type = CONG_TYPE_DCQCN; + break; + case HNS_ROCE_CREATE_QP_FLAGS_LDCP: + hr_qp->cong_type = CONG_TYPE_LDCP; + break; + case HNS_ROCE_CREATE_QP_FLAGS_HC3: + hr_qp->cong_type = CONG_TYPE_HC3; + break; + case HNS_ROCE_CREATE_QP_FLAGS_DIP: + hr_qp->cong_type = CONG_TYPE_DIP; + break; + default: + return -EINVAL; + } + + if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap)) + return -EOPNOTSUPP; + + if (hr_qp->ibqp.qp_type == IB_QPT_UD && + hr_qp->cong_type != CONG_TYPE_DCQCN) + return -EOPNOTSUPP; + + return 0; +} + +static int set_congest_param(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct hns_roce_ib_create_qp *ucmd) +{ + if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE) + return set_congest_type(hr_qp, ucmd); + + default_congest_type(hr_dev, hr_qp); + + return 0; +} + static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, @@ -1043,6 +1097,10 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ibdev_err(ibdev, "failed to set user SQ size, ret = %d.\n", ret); + + ret = set_congest_param(hr_dev, hr_qp, ucmd); + if (ret) + return ret; } else { if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) hr_qp->config = HNS_ROCE_EXSGE_FLAGS; @@ -1051,6 +1109,8 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ibdev_err(ibdev, "failed to set kernel SQ size, ret = %d.\n", ret); + + default_congest_type(hr_dev, hr_qp); } return ret; diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index f7f3c4cc74..356d988169 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -97,16 +97,33 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); - struct hns_roce_v2_qp_context context; + struct hns_roce_full_qp_ctx { + struct hns_roce_v2_qp_context qpc; + struct hns_roce_v2_scc_context sccc; + } context = {}; int ret; if (!hr_dev->hw->query_qpc) return -EINVAL; - ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context); + ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc); if (ret) - return -EINVAL; + return ret; + + /* If SCC is disabled or the query fails, the queried SCCC will + * be all 0. + */ + if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) || + !hr_dev->hw->query_sccc) + goto out; + + ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc); + if (ret) + ibdev_warn_ratelimited(&hr_dev->ib_dev, + "failed to query SCCC, ret = %d.\n", + ret); +out: ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); return ret; diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c index b70b13484f..13a49d8fd4 100644 --- a/drivers/infiniband/hw/mana/mr.c +++ b/drivers/infiniband/hw/mana/mr.c @@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x", start, iova, length, access_flags); + access_flags &= ~IB_ACCESS_OPTIONAL; if (access_flags & ~VALID_MR_FLAGS) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c2b557e642..9fb8a54423 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3760,10 +3760,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; return 0; -err: - mlx5r_macsec_dealloc_gids(dev); err_mp: mlx5_ib_cleanup_multiport_master(dev); +err: + mlx5r_macsec_dealloc_gids(dev); return err; } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 79ebafecca..f255a12e26 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1378,7 +1378,6 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props); void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, u64 access_flags); -void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev); void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index ecc111ed5d..d3c1f63791 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); MLX5_SET(mkc, mkc, access_mode_4_2, (ent->rb_key.access_mode >> 2) & 0x7); + MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); MLX5_SET(mkc, mkc, translations_octword_size, get_mkc_octo_size(ent->rb_key.access_mode, @@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache, new = &((*new)->rb_left); if (cmp < 0) new = &((*new)->rb_right); - if (cmp == 0) { - mutex_unlock(&cache->rb_lock); + if (cmp == 0) return -EEXIST; - } } /* Add new node and rebalance tree. */ @@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, } mr->mmkey.cache_ent = ent; mr->mmkey.type = MLX5_MKEY_MR; + mr->mmkey.rb_key = ent->rb_key; + mr->mmkey.cacheable = true; init_waitqueue_head(&mr->mmkey.wait); return mr; } @@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, mr->ibmr.pd = pd; mr->umem = umem; mr->page_shift = order_base_2(page_size); - mr->mmkey.cacheable = true; set_mr_fields(dev, mr, umem->length, access_flags, iova); return mr; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index a056ea835d..84be0c3d56 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq, int err; struct mlx5_srq_attr in = {}; __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); + __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) / + sizeof(struct mlx5_wqe_data_seg); if (init_attr->srq_type != IB_SRQT_BASIC && init_attr->srq_type != IB_SRQT_XRC && init_attr->srq_type != IB_SRQT_TM) return -EOPNOTSUPP; - /* Sanity check SRQ size before proceeding */ - if (init_attr->attr.max_wr >= max_srq_wqes) { - mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", - init_attr->attr.max_wr, - max_srq_wqes); + /* Sanity check SRQ and sge size before proceeding */ + if (init_attr->attr.max_wr >= max_srq_wqes || + init_attr->attr.max_sge > max_sge_sz) { + mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n", + init_attr->attr.max_wr, max_srq_wqes, + init_attr->attr.max_sge, max_sge_sz); return -EINVAL; } diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 6f9ec8db01..255677bc12 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -162,8 +162,6 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) port->attr.active_mtu = mtu; port->mtu_cap = ib_mtu_enum_to_int(mtu); - - rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap); } /* called by ifc layer to create new rxe device. @@ -183,7 +181,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev) int err = 0; if (is_vlan_dev(ndev)) { - rxe_err("rxe creation allowed on top of a real device only"); + rxe_err("rxe creation allowed on top of a real device only\n"); err = -EPERM; goto err; } @@ -191,7 +189,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev) rxe = rxe_get_dev_from_net(ndev); if (rxe) { ib_device_put(&rxe->ib_dev); - rxe_err_dev(rxe, "already configured on %s", ndev->name); + rxe_err_dev(rxe, "already configured on %s\n", ndev->name); err = -EEXIST; goto err; } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index d33dd6cf83..d8fb2c7af3 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -38,7 +38,7 @@ #define RXE_ROCE_V2_SPORT (0xc000) -#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__) +#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__) #define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \ "%s: " fmt, __func__, ##__VA_ARGS__) #define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \ @@ -58,7 +58,7 @@ #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) -#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \ +#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt, __func__, \ ##__VA_ARGS__) #define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \ "%s: " fmt, __func__, ##__VA_ARGS__) @@ -79,7 +79,7 @@ #define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) -#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \ +#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt, __func__, \ ##__VA_ARGS__) #define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \ "%s: " fmt, __func__, ##__VA_ARGS__) diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index acd2172bf0..c997b7cbf2 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -433,7 +433,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, } } else { if (wqe->status != IB_WC_WR_FLUSH_ERR) - rxe_err_qp(qp, "non-flush error status = %d", + rxe_err_qp(qp, "non-flush error status = %d\n", wqe->status); } } @@ -582,7 +582,7 @@ static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) err = rxe_cq_post(qp->scq, &cqe, 0); if (err) - rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err); + rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err); return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index d5486cbb3f..fec87c9030 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -27,7 +27,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, if (cq) { count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); if (cqe < count) { - rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)", + rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n", cqe, count); goto err1; } @@ -96,7 +96,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); if (unlikely(full)) { - rxe_err_cq(cq, "queue full"); + rxe_err_cq(cq, "queue full\n"); spin_unlock_irqrestore(&cq->cq_lock, flags); if (cq->ibcq.event_handler) { ev.device = cq->ibcq.device; diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 4d2a8ef52c..746110898a 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -59,7 +59,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); /* rxe_mr.c */ u8 rxe_get_next_key(u32 last_key); void rxe_mr_init_dma(int access, struct rxe_mr *mr); -int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, int access, struct rxe_mr *mr); int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr); int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index f54042e9ae..da3dee5208 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -34,7 +34,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) case IB_MR_TYPE_MEM_REG: if (iova < mr->ibmr.iova || iova + length > mr->ibmr.iova + mr->ibmr.length) { - rxe_dbg_mr(mr, "iova/length out of range"); + rxe_dbg_mr(mr, "iova/length out of range\n"); return -EINVAL; } return 0; @@ -126,7 +126,7 @@ static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt) return xas_error(&xas); } -int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, int access, struct rxe_mr *mr) { struct ib_umem *umem; @@ -319,7 +319,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, err = mr_check_range(mr, iova, length); if (unlikely(err)) { - rxe_dbg_mr(mr, "iova out of range"); + rxe_dbg_mr(mr, "iova out of range\n"); return err; } @@ -477,7 +477,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, u64 *va; if (unlikely(mr->state != RXE_MR_STATE_VALID)) { - rxe_dbg_mr(mr, "mr not in valid state"); + rxe_dbg_mr(mr, "mr not in valid state\n"); return RESPST_ERR_RKEY_VIOLATION; } @@ -490,7 +490,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, err = mr_check_range(mr, iova, sizeof(value)); if (err) { - rxe_dbg_mr(mr, "iova out of range"); + rxe_dbg_mr(mr, "iova out of range\n"); return RESPST_ERR_RKEY_VIOLATION; } page_offset = rxe_mr_iova_to_page_offset(mr, iova); @@ -501,7 +501,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, } if (unlikely(page_offset & 0x7)) { - rxe_dbg_mr(mr, "iova not aligned"); + rxe_dbg_mr(mr, "iova not aligned\n"); return RESPST_ERR_MISALIGNED_ATOMIC; } @@ -534,7 +534,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) /* See IBA oA19-28 */ if (unlikely(mr->state != RXE_MR_STATE_VALID)) { - rxe_dbg_mr(mr, "mr not in valid state"); + rxe_dbg_mr(mr, "mr not in valid state\n"); return RESPST_ERR_RKEY_VIOLATION; } @@ -548,7 +548,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) /* See IBA oA19-28 */ err = mr_check_range(mr, iova, sizeof(value)); if (unlikely(err)) { - rxe_dbg_mr(mr, "iova out of range"); + rxe_dbg_mr(mr, "iova out of range\n"); return RESPST_ERR_RKEY_VIOLATION; } page_offset = rxe_mr_iova_to_page_offset(mr, iova); @@ -560,7 +560,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) /* See IBA A19.4.2 */ if (unlikely(page_offset & 0x7)) { - rxe_dbg_mr(mr, "misaligned address"); + rxe_dbg_mr(mr, "misaligned address\n"); return RESPST_ERR_MISALIGNED_ATOMIC; } diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c index d9312b5c9d..379e65bfcd 100644 --- a/drivers/infiniband/sw/rxe/rxe_mw.c +++ b/drivers/infiniband/sw/rxe/rxe_mw.c @@ -198,7 +198,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) } if (access & ~RXE_ACCESS_SUPPORTED_MW) { - rxe_err_mw(mw, "access %#x not supported", access); + rxe_err_mw(mw, "access %#x not supported\n", access); ret = -EOPNOTSUPP; goto err_drop_mr; } diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 28e379c108..e3589c0201 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -201,7 +201,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init, qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size, QUEUE_TYPE_FROM_CLIENT); if (!qp->sq.queue) { - rxe_err_qp(qp, "Unable to allocate send queue"); + rxe_err_qp(qp, "Unable to allocate send queue\n"); err = -ENOMEM; goto err_out; } @@ -211,7 +211,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init, qp->sq.queue->buf, qp->sq.queue->buf_size, &qp->sq.queue->ip); if (err) { - rxe_err_qp(qp, "do_mmap_info failed, err = %d", err); + rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err); goto err_free; } @@ -292,7 +292,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init, qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size, QUEUE_TYPE_FROM_CLIENT); if (!qp->rq.queue) { - rxe_err_qp(qp, "Unable to allocate recv queue"); + rxe_err_qp(qp, "Unable to allocate recv queue\n"); err = -ENOMEM; goto err_out; } @@ -302,7 +302,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init, qp->rq.queue->buf, qp->rq.queue->buf_size, &qp->rq.queue->ip); if (err) { - rxe_err_qp(qp, "do_mmap_info failed, err = %d", err); + rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err); goto err_free; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index da470a925e..fa2b87c749 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -354,6 +354,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, * receive buffer later. For rmda operations additional * length checks are performed in check_rkey. */ + if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { + unsigned int payload = payload_size(pkt); + unsigned int recv_buffer_len = 0; + int i; + + for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) + recv_buffer_len += qp->resp.wqe->dma.sge[i].length; + if (payload + 40 > recv_buffer_len) { + rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); + return RESPST_ERR_LENGTH; + } + } + if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))) { unsigned int mtu = qp->mtu; @@ -362,18 +375,18 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, if ((pkt->mask & RXE_START_MASK) && (pkt->mask & RXE_END_MASK)) { if (unlikely(payload > mtu)) { - rxe_dbg_qp(qp, "only packet too long"); + rxe_dbg_qp(qp, "only packet too long\n"); return RESPST_ERR_LENGTH; } } else if ((pkt->mask & RXE_START_MASK) || (pkt->mask & RXE_MIDDLE_MASK)) { if (unlikely(payload != mtu)) { - rxe_dbg_qp(qp, "first or middle packet not mtu"); + rxe_dbg_qp(qp, "first or middle packet not mtu\n"); return RESPST_ERR_LENGTH; } } else if (pkt->mask & RXE_END_MASK) { if (unlikely((payload == 0) || (payload > mtu))) { - rxe_dbg_qp(qp, "last packet zero or too long"); + rxe_dbg_qp(qp, "last packet zero or too long\n"); return RESPST_ERR_LENGTH; } } @@ -382,7 +395,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, /* See IBA C9-94 */ if (pkt->mask & RXE_RETH_MASK) { if (reth_len(pkt) > (1U << 31)) { - rxe_dbg_qp(qp, "dma length too long"); + rxe_dbg_qp(qp, "dma length too long\n"); return RESPST_ERR_LENGTH; } } @@ -1133,7 +1146,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - rxe_err_qp(qp, "non-flush error status = %d", + rxe_err_qp(qp, "non-flush error status = %d\n", wc->status); } @@ -1442,7 +1455,7 @@ static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) err = rxe_cq_post(qp->rcq, &cqe, 0); if (err) - rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err); + rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err); return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 1501120d4f..80332638d9 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -156,7 +156,7 @@ static void do_task(struct rxe_task *task) default: WARN_ON(1); - rxe_dbg_qp(task->qp, "unexpected task state = %d", + rxe_dbg_qp(task->qp, "unexpected task state = %d\n", task->state); task->state = TASK_STATE_IDLE; } @@ -167,7 +167,7 @@ exit: if (WARN_ON(task->num_done != task->num_sched)) rxe_dbg_qp( task->qp, - "%ld tasks scheduled, %ld tasks done", + "%ld tasks scheduled, %ld tasks done\n", task->num_sched, task->num_done); } spin_unlock_irqrestore(&task->lock, flags); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0930350522..a7e9510666 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -23,7 +23,7 @@ static int rxe_query_device(struct ib_device *ibdev, int err; if (udata->inlen || udata->outlen) { - rxe_dbg_dev(rxe, "malformed udata"); + rxe_dbg_dev(rxe, "malformed udata\n"); err = -EINVAL; goto err_out; } @@ -33,7 +33,7 @@ static int rxe_query_device(struct ib_device *ibdev, return 0; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -45,7 +45,7 @@ static int rxe_query_port(struct ib_device *ibdev, if (port_num != 1) { err = -EINVAL; - rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); goto err_out; } @@ -67,7 +67,7 @@ static int rxe_query_port(struct ib_device *ibdev, return ret; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -79,7 +79,7 @@ static int rxe_query_pkey(struct ib_device *ibdev, if (index != 0) { err = -EINVAL; - rxe_dbg_dev(rxe, "bad pkey index = %d", index); + rxe_dbg_dev(rxe, "bad pkey index = %d\n", index); goto err_out; } @@ -87,7 +87,7 @@ static int rxe_query_pkey(struct ib_device *ibdev, return 0; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -100,7 +100,7 @@ static int rxe_modify_device(struct ib_device *ibdev, if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | IB_DEVICE_MODIFY_NODE_DESC)) { err = -EOPNOTSUPP; - rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask); + rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask); goto err_out; } @@ -115,7 +115,7 @@ static int rxe_modify_device(struct ib_device *ibdev, return 0; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -128,14 +128,14 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num, if (port_num != 1) { err = -EINVAL; - rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); goto err_out; } //TODO is shutdown useful if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) { err = -EOPNOTSUPP; - rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask); + rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask); goto err_out; } @@ -149,7 +149,7 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num, return 0; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -161,14 +161,14 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev, if (port_num != 1) { err = -EINVAL; - rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); goto err_out; } return IB_LINK_LAYER_ETHERNET; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -181,7 +181,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num, if (port_num != 1) { err = -EINVAL; - rxe_dbg_dev(rxe, "bad port_num = %d", port_num); + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); goto err_out; } @@ -197,7 +197,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num, return 0; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -210,7 +210,7 @@ static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) err = rxe_add_to_pool(&rxe->uc_pool, uc); if (err) - rxe_err_dev(rxe, "unable to create uc"); + rxe_err_dev(rxe, "unable to create uc\n"); return err; } @@ -222,7 +222,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) err = rxe_cleanup(uc); if (err) - rxe_err_uc(uc, "cleanup failed, err = %d", err); + rxe_err_uc(uc, "cleanup failed, err = %d\n", err); } /* pd */ @@ -234,14 +234,14 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) err = rxe_add_to_pool(&rxe->pd_pool, pd); if (err) { - rxe_dbg_dev(rxe, "unable to alloc pd"); + rxe_dbg_dev(rxe, "unable to alloc pd\n"); goto err_out; } return 0; err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -252,7 +252,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) err = rxe_cleanup(pd); if (err) - rxe_err_pd(pd, "cleanup failed, err = %d", err); + rxe_err_pd(pd, "cleanup failed, err = %d\n", err); return 0; } @@ -279,7 +279,7 @@ static int rxe_create_ah(struct ib_ah *ibah, err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); if (err) { - rxe_dbg_dev(rxe, "unable to create ah"); + rxe_dbg_dev(rxe, "unable to create ah\n"); goto err_out; } @@ -288,7 +288,7 @@ static int rxe_create_ah(struct ib_ah *ibah, err = rxe_ah_chk_attr(ah, init_attr->ah_attr); if (err) { - rxe_dbg_ah(ah, "bad attr"); + rxe_dbg_ah(ah, "bad attr\n"); goto err_cleanup; } @@ -298,7 +298,7 @@ static int rxe_create_ah(struct ib_ah *ibah, sizeof(uresp->ah_num)); if (err) { err = -EFAULT; - rxe_dbg_ah(ah, "unable to copy to user"); + rxe_dbg_ah(ah, "unable to copy to user\n"); goto err_cleanup; } } else if (ah->is_user) { @@ -314,9 +314,9 @@ static int rxe_create_ah(struct ib_ah *ibah, err_cleanup: cleanup_err = rxe_cleanup(ah); if (cleanup_err) - rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err); + rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err); err_out: - rxe_err_ah(ah, "returned err = %d", err); + rxe_err_ah(ah, "returned err = %d\n", err); return err; } @@ -327,7 +327,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) err = rxe_ah_chk_attr(ah, attr); if (err) { - rxe_dbg_ah(ah, "bad attr"); + rxe_dbg_ah(ah, "bad attr\n"); goto err_out; } @@ -336,7 +336,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) return 0; err_out: - rxe_err_ah(ah, "returned err = %d", err); + rxe_err_ah(ah, "returned err = %d\n", err); return err; } @@ -358,7 +358,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); if (err) - rxe_err_ah(ah, "cleanup failed, err = %d", err); + rxe_err_ah(ah, "cleanup failed, err = %d\n", err); return 0; } @@ -376,7 +376,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, if (udata) { if (udata->outlen < sizeof(*uresp)) { err = -EINVAL; - rxe_err_dev(rxe, "malformed udata"); + rxe_err_dev(rxe, "malformed udata\n"); goto err_out; } uresp = udata->outbuf; @@ -384,20 +384,20 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, if (init->srq_type != IB_SRQT_BASIC) { err = -EOPNOTSUPP; - rxe_dbg_dev(rxe, "srq type = %d, not supported", + rxe_dbg_dev(rxe, "srq type = %d, not supported\n", init->srq_type); goto err_out; } err = rxe_srq_chk_init(rxe, init); if (err) { - rxe_dbg_dev(rxe, "invalid init attributes"); + rxe_dbg_dev(rxe, "invalid init attributes\n"); goto err_out; } err = rxe_add_to_pool(&rxe->srq_pool, srq); if (err) { - rxe_dbg_dev(rxe, "unable to create srq, err = %d", err); + rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err); goto err_out; } @@ -406,7 +406,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, err = rxe_srq_from_init(rxe, srq, init, udata, uresp); if (err) { - rxe_dbg_srq(srq, "create srq failed, err = %d", err); + rxe_dbg_srq(srq, "create srq failed, err = %d\n", err); goto err_cleanup; } @@ -415,9 +415,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, err_cleanup: cleanup_err = rxe_cleanup(srq); if (cleanup_err) - rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err); + rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err); err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -433,34 +433,34 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, if (udata) { if (udata->inlen < sizeof(cmd)) { err = -EINVAL; - rxe_dbg_srq(srq, "malformed udata"); + rxe_dbg_srq(srq, "malformed udata\n"); goto err_out; } err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); if (err) { err = -EFAULT; - rxe_dbg_srq(srq, "unable to read udata"); + rxe_dbg_srq(srq, "unable to read udata\n"); goto err_out; } } err = rxe_srq_chk_attr(rxe, srq, attr, mask); if (err) { - rxe_dbg_srq(srq, "bad init attributes"); + rxe_dbg_srq(srq, "bad init attributes\n"); goto err_out; } err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata); if (err) { - rxe_dbg_srq(srq, "bad attr"); + rxe_dbg_srq(srq, "bad attr\n"); goto err_out; } return 0; err_out: - rxe_err_srq(srq, "returned err = %d", err); + rxe_err_srq(srq, "returned err = %d\n", err); return err; } @@ -471,7 +471,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) if (srq->error) { err = -EINVAL; - rxe_dbg_srq(srq, "srq in error state"); + rxe_dbg_srq(srq, "srq in error state\n"); goto err_out; } @@ -481,7 +481,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) return 0; err_out: - rxe_err_srq(srq, "returned err = %d", err); + rxe_err_srq(srq, "returned err = %d\n", err); return err; } @@ -505,7 +505,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, if (err) { *bad_wr = wr; - rxe_err_srq(srq, "returned err = %d", err); + rxe_err_srq(srq, "returned err = %d\n", err); } return err; @@ -518,7 +518,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) err = rxe_cleanup(srq); if (err) - rxe_err_srq(srq, "cleanup failed, err = %d", err); + rxe_err_srq(srq, "cleanup failed, err = %d\n", err); return 0; } @@ -536,13 +536,13 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, if (udata) { if (udata->inlen) { err = -EINVAL; - rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); goto err_out; } if (udata->outlen < sizeof(*uresp)) { err = -EINVAL; - rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); goto err_out; } @@ -554,25 +554,25 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, if (init->create_flags) { err = -EOPNOTSUPP; - rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err); + rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err); goto err_out; } err = rxe_qp_chk_init(rxe, init); if (err) { - rxe_dbg_dev(rxe, "bad init attr, err = %d", err); + rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err); goto err_out; } err = rxe_add_to_pool(&rxe->qp_pool, qp); if (err) { - rxe_dbg_dev(rxe, "unable to create qp, err = %d", err); + rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err); goto err_out; } err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); if (err) { - rxe_dbg_qp(qp, "create qp failed, err = %d", err); + rxe_dbg_qp(qp, "create qp failed, err = %d\n", err); goto err_cleanup; } @@ -582,9 +582,9 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, err_cleanup: cleanup_err = rxe_cleanup(qp); if (cleanup_err) - rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err); + rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err); err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -597,20 +597,20 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (mask & ~IB_QP_ATTR_STANDARD_BITS) { err = -EOPNOTSUPP; - rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d", + rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n", mask, err); goto err_out; } err = rxe_qp_chk_attr(rxe, qp, attr, mask); if (err) { - rxe_dbg_qp(qp, "bad mask/attr, err = %d", err); + rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err); goto err_out; } err = rxe_qp_from_attr(qp, attr, mask, udata); if (err) { - rxe_dbg_qp(qp, "modify qp failed, err = %d", err); + rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err); goto err_out; } @@ -622,7 +622,7 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, return 0; err_out: - rxe_err_qp(qp, "returned err = %d", err); + rxe_err_qp(qp, "returned err = %d\n", err); return err; } @@ -644,18 +644,18 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) err = rxe_qp_chk_destroy(qp); if (err) { - rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err); + rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err); goto err_out; } err = rxe_cleanup(qp); if (err) - rxe_err_qp(qp, "cleanup failed, err = %d", err); + rxe_err_qp(qp, "cleanup failed, err = %d\n", err); return 0; err_out: - rxe_err_qp(qp, "returned err = %d", err); + rxe_err_qp(qp, "returned err = %d\n", err); return err; } @@ -675,12 +675,12 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, do { mask = wr_opcode_mask(ibwr->opcode, qp); if (!mask) { - rxe_err_qp(qp, "bad wr opcode for qp type"); + rxe_err_qp(qp, "bad wr opcode for qp type\n"); break; } if (num_sge > sq->max_sge) { - rxe_err_qp(qp, "num_sge > max_sge"); + rxe_err_qp(qp, "num_sge > max_sge\n"); break; } @@ -689,27 +689,27 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, length += ibwr->sg_list[i].length; if (length > (1UL << 31)) { - rxe_err_qp(qp, "message length too long"); + rxe_err_qp(qp, "message length too long\n"); break; } if (mask & WR_ATOMIC_MASK) { if (length != 8) { - rxe_err_qp(qp, "atomic length != 8"); + rxe_err_qp(qp, "atomic length != 8\n"); break; } if (atomic_wr(ibwr)->remote_addr & 0x7) { - rxe_err_qp(qp, "misaligned atomic address"); + rxe_err_qp(qp, "misaligned atomic address\n"); break; } } if (ibwr->send_flags & IB_SEND_INLINE) { if (!(mask & WR_INLINE_MASK)) { - rxe_err_qp(qp, "opcode doesn't support inline data"); + rxe_err_qp(qp, "opcode doesn't support inline data\n"); break; } if (length > sq->max_inline) { - rxe_err_qp(qp, "inline length too big"); + rxe_err_qp(qp, "inline length too big\n"); break; } } @@ -747,7 +747,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, case IB_WR_SEND: break; default: - rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP", + rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n", wr->opcode); return -EINVAL; } @@ -795,7 +795,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, case IB_WR_ATOMIC_WRITE: break; default: - rxe_err_qp(qp, "unsupported wr opcode %d", + rxe_err_qp(qp, "unsupported wr opcode %d\n", wr->opcode); return -EINVAL; } @@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, int i; for (i = 0; i < ibwr->num_sge; i++, sge++) { - memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length); + memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length); p += sge->length; } } @@ -870,7 +870,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr) full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP); if (unlikely(full)) { - rxe_err_qp(qp, "send queue full"); + rxe_err_qp(qp, "send queue full\n"); return -ENOMEM; } @@ -926,14 +926,14 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, /* caller has already called destroy_qp */ if (WARN_ON_ONCE(!qp->valid)) { spin_unlock_irqrestore(&qp->state_lock, flags); - rxe_err_qp(qp, "qp has been destroyed"); + rxe_err_qp(qp, "qp has been destroyed\n"); return -EINVAL; } if (unlikely(qp_state(qp) < IB_QPS_RTS)) { spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; - rxe_err_qp(qp, "qp not ready to send"); + rxe_err_qp(qp, "qp not ready to send\n"); return -EINVAL; } spin_unlock_irqrestore(&qp->state_lock, flags); @@ -963,13 +963,13 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); if (unlikely(full)) { err = -ENOMEM; - rxe_dbg("queue full"); + rxe_dbg("queue full\n"); goto err_out; } if (unlikely(num_sge > rq->max_sge)) { err = -EINVAL; - rxe_dbg("bad num_sge > max_sge"); + rxe_dbg("bad num_sge > max_sge\n"); goto err_out; } @@ -980,7 +980,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) /* IBA max message size is 2^31 */ if (length >= (1UL<<31)) { err = -EINVAL; - rxe_dbg("message length too long"); + rxe_dbg("message length too long\n"); goto err_out; } @@ -1000,7 +1000,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) return 0; err_out: - rxe_dbg("returned err = %d", err); + rxe_dbg("returned err = %d\n", err); return err; } @@ -1016,7 +1016,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, /* caller has already called destroy_qp */ if (WARN_ON_ONCE(!qp->valid)) { spin_unlock_irqrestore(&qp->state_lock, flags); - rxe_err_qp(qp, "qp has been destroyed"); + rxe_err_qp(qp, "qp has been destroyed\n"); return -EINVAL; } @@ -1024,14 +1024,14 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, if (unlikely((qp_state(qp) < IB_QPS_INIT))) { spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; - rxe_dbg_qp(qp, "qp not ready to post recv"); + rxe_dbg_qp(qp, "qp not ready to post recv\n"); return -EINVAL; } spin_unlock_irqrestore(&qp->state_lock, flags); if (unlikely(qp->srq)) { *bad_wr = wr; - rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead"); + rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n"); return -EINVAL; } @@ -1069,7 +1069,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (udata) { if (udata->outlen < sizeof(*uresp)) { err = -EINVAL; - rxe_dbg_dev(rxe, "malformed udata, err = %d", err); + rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); goto err_out; } uresp = udata->outbuf; @@ -1077,26 +1077,26 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (attr->flags) { err = -EOPNOTSUPP; - rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err); + rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err); goto err_out; } err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector); if (err) { - rxe_dbg_dev(rxe, "bad init attributes, err = %d", err); + rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err); goto err_out; } err = rxe_add_to_pool(&rxe->cq_pool, cq); if (err) { - rxe_dbg_dev(rxe, "unable to create cq, err = %d", err); + rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err); goto err_out; } err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, uresp); if (err) { - rxe_dbg_cq(cq, "create cq failed, err = %d", err); + rxe_dbg_cq(cq, "create cq failed, err = %d\n", err); goto err_cleanup; } @@ -1105,9 +1105,9 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, err_cleanup: cleanup_err = rxe_cleanup(cq); if (cleanup_err) - rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err); + rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err); err_out: - rxe_err_dev(rxe, "returned err = %d", err); + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } @@ -1121,7 +1121,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) if (udata) { if (udata->outlen < sizeof(*uresp)) { err = -EINVAL; - rxe_dbg_cq(cq, "malformed udata"); + rxe_dbg_cq(cq, "malformed udata\n"); goto err_out; } uresp = udata->outbuf; @@ -1129,20 +1129,20 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) err = rxe_cq_chk_attr(rxe, cq, cqe, 0); if (err) { - rxe_dbg_cq(cq, "bad attr, err = %d", err); + rxe_dbg_cq(cq, "bad attr, err = %d\n", err); goto err_out; } err = rxe_cq_resize_queue(cq, cqe, uresp, udata); if (err) { - rxe_dbg_cq(cq, "resize cq failed, err = %d", err); + rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err); goto err_out; } return 0; err_out: - rxe_err_cq(cq, "returned err = %d", err); + rxe_err_cq(cq, "returned err = %d\n", err); return err; } @@ -1206,18 +1206,18 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) */ if (atomic_read(&cq->num_wq)) { err = -EINVAL; - rxe_dbg_cq(cq, "still in use"); + rxe_dbg_cq(cq, "still in use\n"); goto err_out; } err = rxe_cleanup(cq); if (err) - rxe_err_cq(cq, "cleanup failed, err = %d", err); + rxe_err_cq(cq, "cleanup failed, err = %d\n", err); return 0; err_out: - rxe_err_cq(cq, "returned err = %d", err); + rxe_err_cq(cq, "returned err = %d\n", err); return err; } @@ -1235,7 +1235,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) err = rxe_add_to_pool(&rxe->mr_pool, mr); if (err) { - rxe_dbg_dev(rxe, "unable to create mr"); + rxe_dbg_dev(rxe, "unable to create mr\n"); goto err_free; } @@ -1249,7 +1249,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) err_free: kfree(mr); - rxe_err_pd(pd, "returned err = %d", err); + rxe_err_pd(pd, "returned err = %d\n", err); return ERR_PTR(err); } @@ -1263,7 +1263,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, int err, cleanup_err; if (access & ~RXE_ACCESS_SUPPORTED_MR) { - rxe_err_pd(pd, "access = %#x not supported (%#x)", access, + rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access, RXE_ACCESS_SUPPORTED_MR); return ERR_PTR(-EOPNOTSUPP); } @@ -1274,7 +1274,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, err = rxe_add_to_pool(&rxe->mr_pool, mr); if (err) { - rxe_dbg_pd(pd, "unable to create mr"); + rxe_dbg_pd(pd, "unable to create mr\n"); goto err_free; } @@ -1282,9 +1282,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, mr->ibmr.pd = ibpd; mr->ibmr.device = ibpd->device; - err = rxe_mr_init_user(rxe, start, length, iova, access, mr); + err = rxe_mr_init_user(rxe, start, length, access, mr); if (err) { - rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err); + rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err); goto err_cleanup; } @@ -1294,10 +1294,10 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, err_cleanup: cleanup_err = rxe_cleanup(mr); if (cleanup_err) - rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); + rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err); err_free: kfree(mr); - rxe_err_pd(pd, "returned err = %d", err); + rxe_err_pd(pd, "returned err = %d\n", err); return ERR_PTR(err); } @@ -1314,7 +1314,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags, * rereg_pd and rereg_access */ if (flags & ~RXE_MR_REREG_SUPPORTED) { - rxe_err_mr(mr, "flags = %#x not supported", flags); + rxe_err_mr(mr, "flags = %#x not supported\n", flags); return ERR_PTR(-EOPNOTSUPP); } @@ -1326,7 +1326,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags, if (flags & IB_MR_REREG_ACCESS) { if (access & ~RXE_ACCESS_SUPPORTED_MR) { - rxe_err_mr(mr, "access = %#x not supported", access); + rxe_err_mr(mr, "access = %#x not supported\n", access); return ERR_PTR(-EOPNOTSUPP); } mr->access = access; @@ -1345,7 +1345,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, if (mr_type != IB_MR_TYPE_MEM_REG) { err = -EINVAL; - rxe_dbg_pd(pd, "mr type %d not supported, err = %d", + rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n", mr_type, err); goto err_out; } @@ -1364,7 +1364,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, err = rxe_mr_init_fast(max_num_sg, mr); if (err) { - rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err); + rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err); goto err_cleanup; } @@ -1374,11 +1374,11 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, err_cleanup: cleanup_err = rxe_cleanup(mr); if (cleanup_err) - rxe_err_mr(mr, "cleanup failed, err = %d", err); + rxe_err_mr(mr, "cleanup failed, err = %d\n", err); err_free: kfree(mr); err_out: - rxe_err_pd(pd, "returned err = %d", err); + rxe_err_pd(pd, "returned err = %d\n", err); return ERR_PTR(err); } @@ -1390,19 +1390,19 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) /* See IBA 10.6.7.2.6 */ if (atomic_read(&mr->num_mw) > 0) { err = -EINVAL; - rxe_dbg_mr(mr, "mr has mw's bound"); + rxe_dbg_mr(mr, "mr has mw's bound\n"); goto err_out; } cleanup_err = rxe_cleanup(mr); if (cleanup_err) - rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); + rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err); kfree_rcu_mightsleep(mr); return 0; err_out: - rxe_err_mr(mr, "returned err = %d", err); + rxe_err_mr(mr, "returned err = %d\n", err); return err; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 7a5be705d7..6f2a688fcc 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1272,10 +1272,10 @@ static int ipoib_get_iflink(const struct net_device *dev) /* parent interface */ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) - return dev->ifindex; + return READ_ONCE(dev->ifindex); /* child/vlan interface */ - return priv->parent->ifindex; + return READ_ONCE(priv->parent->ifindex); } static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 319d4288ed..8a4ab9ff0a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -287,8 +287,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, ah = ipoib_create_ah(dev, priv->pd, &av); if (IS_ERR(ah)) { - ipoib_warn(priv, "ib_address_create failed %ld\n", - -PTR_ERR(ah)); + ipoib_warn(priv, "ib_address_create failed %pe\n", ah); /* use original error */ return PTR_ERR(ah); } |