summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mana
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mana')
-rw-r--r--drivers/infiniband/hw/mana/cq.c111
-rw-r--r--drivers/infiniband/hw/mana/device.c53
-rw-r--r--drivers/infiniband/hw/mana/main.c328
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h147
-rw-r--r--drivers/infiniband/hw/mana/mr.c3
-rw-r--r--drivers/infiniband/hw/mana/qp.c126
-rw-r--r--drivers/infiniband/hw/mana/wq.c31
7 files changed, 618 insertions, 181 deletions
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 4a71e678d0..c6a3fd57a1 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -9,23 +9,23 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ struct mana_ib_create_cq_resp resp = {};
+ struct mana_ib_ucontext *mana_ucontext;
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
+ bool is_rnic_cq;
+ u32 doorbell;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
- if (udata->inlen < sizeof(ucmd))
- return -EINVAL;
+ cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
+ cq->cq_handle = INVALID_MANA_HANDLE;
- if (attr->comp_vector > gc->max_num_queues)
+ if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
return -EINVAL;
- cq->comp_vector = attr->comp_vector;
-
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
ibdev_dbg(ibdev,
@@ -33,42 +33,54 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
- if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
+ is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
+
+ if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
return -EINVAL;
}
cq->cqe = attr->cqe;
- cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(cq->umem)) {
- err = PTR_ERR(cq->umem);
- ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
- err);
+ err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
return err;
}
- err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
- if (err) {
- ibdev_dbg(ibdev,
- "Failed to create dma region for create cq, %d\n",
- err);
- goto err_release_umem;
+ mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
+ ibucontext);
+ doorbell = mana_ucontext->doorbell;
+
+ if (is_rnic_cq) {
+ err = mana_ib_gd_create_cq(mdev, cq, doorbell);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
+ goto err_destroy_queue;
+ }
+
+ err = mana_ib_install_cq_cb(mdev, cq);
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
+ goto err_destroy_rnic_cq;
+ }
}
- ibdev_dbg(ibdev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, cq->gdma_region);
-
- /*
- * The CQ ID is not known at this time. The ID is generated at create_qp
- */
- cq->id = INVALID_QUEUE_ID;
+ resp.cqid = cq->queue.id;
+ err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
+ goto err_remove_cq_cb;
+ }
return 0;
-err_release_umem:
- ib_umem_release(cq->umem);
+err_remove_cq_cb:
+ mana_ib_remove_cq_cb(mdev, cq);
+err_destroy_rnic_cq:
+ mana_ib_gd_destroy_cq(mdev, cq);
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &cq->queue);
+
return err;
}
@@ -77,25 +89,17 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
- int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
- err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
- if (err) {
- ibdev_dbg(ibdev,
- "Failed to destroy dma region, %d\n", err);
- return err;
- }
+ mana_ib_remove_cq_cb(mdev, cq);
- if (cq->id != INVALID_QUEUE_ID) {
- kfree(gc->cq_table[cq->id]);
- gc->cq_table[cq->id] = NULL;
- }
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_cq(mdev, cq);
- ib_umem_release(cq->umem);
+ mana_ib_destroy_queue(mdev, &cq->queue);
return 0;
}
@@ -113,8 +117,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue *gdma_cq;
+ if (cq->queue.id >= gc->max_num_cqs)
+ return -EINVAL;
/* Create CQ table entry */
- WARN_ON(gc->cq_table[cq->id]);
+ WARN_ON(gc->cq_table[cq->queue.id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq)
return -ENOMEM;
@@ -122,7 +128,18 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
gdma_cq->cq.context = cq;
gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = cq->id;
- gc->cq_table[cq->id] = gdma_cq;
+ gdma_cq->id = cq->queue.id;
+ gc->cq_table[cq->queue.id] = gdma_cq;
return 0;
}
+
+void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+
+ if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
+ return;
+
+ kfree(gc->cq_table[cq->queue.id]);
+ gc->cq_table[cq->queue.id] = NULL;
+}
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index 6fa902ee80..7bb7e06392 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -5,6 +5,7 @@
#include "mana_ib.h"
#include <net/mana/mana_auxiliary.h>
+#include <net/addrconf.h>
MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver");
MODULE_LICENSE("GPL");
@@ -15,6 +16,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.driver_id = RDMA_DRIVER_MANA,
.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
+ .add_gid = mana_ib_gd_add_gid,
.alloc_pd = mana_ib_alloc_pd,
.alloc_ucontext = mana_ib_alloc_ucontext,
.create_cq = mana_ib_create_cq,
@@ -23,18 +25,21 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.create_wq = mana_ib_create_wq,
.dealloc_pd = mana_ib_dealloc_pd,
.dealloc_ucontext = mana_ib_dealloc_ucontext,
+ .del_gid = mana_ib_gd_del_gid,
.dereg_mr = mana_ib_dereg_mr,
.destroy_cq = mana_ib_destroy_cq,
.destroy_qp = mana_ib_destroy_qp,
.destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
.destroy_wq = mana_ib_destroy_wq,
.disassociate_ucontext = mana_ib_disassociate_ucontext,
+ .get_link_layer = mana_ib_get_link_layer,
.get_port_immutable = mana_ib_get_port_immutable,
.mmap = mana_ib_mmap,
.modify_qp = mana_ib_modify_qp,
.modify_wq = mana_ib_modify_wq,
.query_device = mana_ib_query_device,
.query_gid = mana_ib_query_gid,
+ .query_pkey = mana_ib_query_pkey,
.query_port = mana_ib_query_port,
.reg_user_mr = mana_ib_reg_user_mr,
@@ -51,8 +56,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
struct gdma_dev *mdev = madev->mdev;
+ struct net_device *ndev;
struct mana_context *mc;
struct mana_ib_dev *dev;
+ u8 mac_addr[ETH_ALEN];
int ret;
mc = mdev->driver_data;
@@ -74,9 +81,26 @@ static int mana_ib_probe(struct auxiliary_device *adev,
* num_comp_vectors needs to set to the max MSIX index
* when interrupts and event queues are implemented
*/
- dev->ib_dev.num_comp_vectors = 1;
+ dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
+ rcu_read_lock(); /* required to get primary netdev */
+ ndev = mana_get_primary_netdev_rcu(mc, 0);
+ if (!ndev) {
+ rcu_read_unlock();
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
+ goto free_ib_device;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+ rcu_read_unlock();
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto free_ib_device;
+ }
+
ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
@@ -92,15 +116,36 @@ static int mana_ib_probe(struct auxiliary_device *adev,
goto deregister_device;
}
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto deregister_device;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
+ ret);
+ goto destroy_rnic;
+ }
+
ret = ib_register_device(&dev->ib_dev, "mana_%d",
mdev->gdma_context->dev);
if (ret)
- goto deregister_device;
+ goto destroy_rnic;
dev_set_drvdata(&adev->dev, dev);
return 0;
+destroy_rnic:
+ mana_ib_gd_destroy_rnic_adapter(dev);
+destroy_eqs:
+ mana_ib_destroy_eqs(dev);
deregister_device:
mana_gd_deregister_device(dev->gdma_dev);
free_ib_device:
@@ -113,9 +158,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
ib_unregister_device(&dev->ib_dev);
-
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
mana_gd_deregister_device(dev->gdma_dev);
-
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 71e33feee6..2a41135764 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -237,6 +237,47 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
}
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue)
+{
+ struct ib_umem *umem;
+ int err;
+
+ queue->umem = NULL;
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+ umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
+ return err;
+ }
+
+ err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+ goto free_umem;
+ }
+ queue->umem = umem;
+
+ ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
+
+ return 0;
+free_umem:
+ ib_umem_release(umem);
+ return err;
+}
+
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
+{
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
+ ib_umem_release(queue->umem);
+}
+
static int
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
struct gdma_context *gc,
@@ -484,11 +525,18 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
- /*
- * This version only support RAW_PACKET
- * other values need to be filled for other types
- */
+ struct ib_port_attr attr;
+ int err;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+ if (port_num == 1)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
return 0;
}
@@ -514,7 +562,42 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
- /* This version doesn't return port properties */
+ struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
+
+ if (!ndev)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
+
+ if (netif_carrier_ok(ndev) && netif_running(ndev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = IB_SPEED_EDR;
+ props->pkey_tbl_len = 1;
+ if (port == 1)
+ props->gid_tbl_len = 16;
+
+ return 0;
+}
+
+enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index != 0)
+ return -EINVAL;
+ *pkey = IB_DEFAULT_PKEY_FULL;
return 0;
}
@@ -570,3 +653,238 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
return 0;
}
+
+int mana_ib_create_eqs(struct mana_ib_dev *mdev)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue_spec spec = {};
+ int err, i;
+
+ spec.type = GDMA_EQ;
+ spec.monitor_avl_buf = false;
+ spec.queue_size = EQ_SIZE;
+ spec.eq.callback = NULL;
+ spec.eq.context = mdev;
+ spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ spec.eq.msix_index = 0;
+
+ err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ if (err)
+ return err;
+
+ mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
+ GFP_KERNEL);
+ if (!mdev->eqs) {
+ err = -ENOMEM;
+ goto destroy_fatal_eq;
+ }
+
+ for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
+ spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
+ if (err)
+ goto destroy_eqs;
+ }
+
+ return 0;
+
+destroy_eqs:
+ while (i-- > 0)
+ mana_gd_destroy_queue(gc, mdev->eqs[i]);
+ kfree(mdev->eqs);
+destroy_fatal_eq:
+ mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+ return err;
+}
+
+void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int i;
+
+ mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+
+ for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
+ mana_gd_destroy_queue(gc, mdev->eqs[i]);
+
+ kfree(mdev->eqs);
+}
+
+int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
+{
+ struct mana_rnic_create_adapter_resp resp = {};
+ struct mana_rnic_create_adapter_req req = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
+ req.hdr.req.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.notify_eq_id = mdev->fatal_err_eq->id;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
+ return err;
+ }
+ mdev->adapter_handle = resp.adapter;
+
+ return 0;
+}
+
+int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
+{
+ struct mana_rnic_destroy_adapter_resp resp = {};
+ struct mana_rnic_destroy_adapter_req req = {};
+ struct gdma_context *gc;
+ int err;
+
+ gc = mdev_to_gc(mdev);
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
+ enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
+ struct mana_rnic_config_addr_resp resp = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_config_addr_req req = {};
+ int err;
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
+ ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
+ return -EINVAL;
+ }
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = ADDR_OP_ADD;
+ req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
+ copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
+{
+ struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
+ enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
+ struct mana_rnic_config_addr_resp resp = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_config_addr_req req = {};
+ int err;
+
+ if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
+ ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
+ return -EINVAL;
+ }
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = ADDR_OP_REMOVE;
+ req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
+ copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
+{
+ struct mana_rnic_config_mac_addr_resp resp = {};
+ struct mana_rnic_config_mac_addr_req req = {};
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.op = op;
+ copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_create_cq_resp resp = {};
+ struct mana_rnic_create_cq_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.gdma_region = cq->queue.gdma_region;
+ req.eq_id = mdev->eqs[cq->comp_vector]->id;
+ req.doorbell_page = doorbell;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
+ return err;
+ }
+
+ cq->queue.id = resp.cq_id;
+ cq->cq_handle = resp.cq_handle;
+ /* The GDMA region is now owned by the CQ handle */
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+
+ return 0;
+}
+
+int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_rnic_destroy_cq_resp resp = {};
+ struct mana_rnic_destroy_cq_req req = {};
+ int err;
+
+ if (cq->cq_handle == INVALID_MANA_HANDLE)
+ return 0;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
+ req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.adapter = mdev->adapter_handle;
+ req.cq_handle = cq->cq_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index f83390eebb..68c3b4f0fa 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -45,19 +45,26 @@ struct mana_ib_adapter_caps {
u32 max_inline_data_size;
};
+struct mana_ib_queue {
+ struct ib_umem *umem;
+ u64 gdma_region;
+ u64 id;
+};
+
struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
+ mana_handle_t adapter_handle;
+ struct gdma_queue *fatal_err_eq;
+ struct gdma_queue **eqs;
struct mana_ib_adapter_caps adapter_caps;
};
struct mana_ib_wq {
struct ib_wq ibwq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int wqe;
u32 wq_buf_size;
- u64 gdma_region;
- u64 id;
mana_handle_t rx_object;
};
@@ -82,22 +89,17 @@ struct mana_ib_mr {
struct mana_ib_cq {
struct ib_cq ibcq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int cqe;
- u64 gdma_region;
- u64 id;
u32 comp_vector;
+ mana_handle_t cq_handle;
};
struct mana_ib_qp {
struct ib_qp ibqp;
- /* Work queue info */
- struct ib_umem *sq_umem;
- int sqe;
- u64 sq_gdma_region;
- u64 sq_id;
- mana_handle_t tx_object;
+ mana_handle_t qp_handle;
+ struct mana_ib_queue raw_sq;
/* The port on the IB device, starting with 1 */
u32 port;
@@ -114,6 +116,12 @@ struct mana_ib_rwq_ind_table {
enum mana_ib_command_code {
MANA_IB_GET_ADAPTER_CAP = 0x30001,
+ MANA_IB_CREATE_ADAPTER = 0x30002,
+ MANA_IB_DESTROY_ADAPTER = 0x30003,
+ MANA_IB_CONFIG_IP_ADDR = 0x30004,
+ MANA_IB_CONFIG_MAC_ADDR = 0x30005,
+ MANA_IB_CREATE_CQ = 0x30008,
+ MANA_IB_DESTROY_CQ = 0x30009,
};
struct mana_ib_query_adapter_caps_req {
@@ -142,6 +150,86 @@ struct mana_ib_query_adapter_caps_resp {
u32 max_inline_data_size;
}; /* HW Data */
+struct mana_rnic_create_adapter_req {
+ struct gdma_req_hdr hdr;
+ u32 notify_eq_id;
+ u32 reserved;
+ u64 feature_flags;
+}; /*HW Data */
+
+struct mana_rnic_create_adapter_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_destroy_adapter_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /*HW Data */
+
+struct mana_rnic_destroy_adapter_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+enum mana_ib_addr_op {
+ ADDR_OP_ADD = 1,
+ ADDR_OP_REMOVE = 2,
+};
+
+enum sgid_entry_type {
+ SGID_TYPE_IPV4 = 1,
+ SGID_TYPE_IPV6 = 2,
+};
+
+struct mana_rnic_config_addr_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ enum mana_ib_addr_op op;
+ enum sgid_entry_type sgid_type;
+ u8 ip_addr[16];
+}; /* HW Data */
+
+struct mana_rnic_config_addr_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_rnic_config_mac_addr_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ enum mana_ib_addr_op op;
+ u8 mac_addr[ETH_ALEN];
+ u8 reserved[6];
+}; /* HW Data */
+
+struct mana_rnic_config_mac_addr_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
+struct mana_rnic_create_cq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ u64 gdma_region;
+ u32 eq_id;
+ u32 doorbell_page;
+}; /* HW Data */
+
+struct mana_rnic_create_cq_resp {
+ struct gdma_resp_hdr hdr;
+ mana_handle_t cq_handle;
+ u32 cq_id;
+ u32 reserved;
+}; /* HW Data */
+
+struct mana_rnic_destroy_cq_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+ mana_handle_t cq_handle;
+}; /* HW Data */
+
+struct mana_rnic_destroy_cq_resp {
+ struct gdma_resp_hdr hdr;
+}; /* HW Data */
+
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
{
return mdev->gdma_dev->gdma_context;
@@ -158,7 +246,16 @@ static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32
return mc->ports[port - 1];
}
+static inline void copy_in_reverse(u8 *dst, const u8 *src, u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ dst[size - 1 - i] = src[i];
+}
+
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region);
@@ -169,6 +266,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue);
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
+
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
@@ -231,4 +332,26 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+
+int mana_ib_create_eqs(struct mana_ib_dev *mdev);
+
+void mana_ib_destroy_eqs(struct mana_ib_dev *mdev);
+
+int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev);
+
+int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev);
+
+int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+
+enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num);
+
+int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context);
+
+int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context);
+
+int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac);
+
+int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell);
+
+int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index b70b13484f..887b09dd86 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
start, iova, length, access_flags);
+ access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EINVAL);
@@ -135,7 +136,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
}
ibdev_dbg(ibdev,
- "create_dma_region ret %d gdma_region %llx\n", err,
+ "created dma region for user-mr 0x%llx\n",
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 6e7627745c..ba13c5abf8 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -15,15 +15,13 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
struct mana_port_context *mpc = netdev_priv(ndev);
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
- mana_handle_t *req_indir_tab;
struct gdma_context *gc;
u32 req_buf_size;
int i, err;
gc = mdev_to_gc(dev);
- req_buf_size =
- sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
+ req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_SIZE);
req = kzalloc(req_buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -44,20 +42,20 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->rss_enable = true;
req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
- req->indir_tab_offset = sizeof(*req);
+ req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
+ indir_tab);
req->update_indir_tab = true;
req->cqe_coalescing_enable = 1;
- req_indir_tab = (mana_handle_t *)(req + 1);
/* The ind table passed to the hardware must have
* MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
* ind_table to MANA_INDIRECT_TABLE_SIZE if required
*/
ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
- req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
+ req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
- req_indir_tab[i]);
+ req->indir_tab[i]);
}
req->update_hashkey = true;
@@ -97,11 +95,9 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
- struct gdma_context *gc = mdev_to_gc(mdev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
- struct gdma_queue **gdma_cq_allocated;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
unsigned int ind_tbl_size;
@@ -175,13 +171,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail;
}
- gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
- GFP_KERNEL);
- if (!gdma_cq_allocated) {
- ret = -ENOMEM;
- goto fail;
- }
-
qp->port = port;
for (i = 0; i < ind_tbl_size; i++) {
@@ -194,13 +183,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ibcq = ibwq->cq;
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- wq_spec.gdma_region = wq->gdma_region;
+ wq_spec.gdma_region = wq->queue.gdma_region;
wq_spec.queue_size = wq->wq_buf_size;
- cq_spec.gdma_region = cq->gdma_region;
+ cq_spec.gdma_region = cq->queue.gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
+ eq = &mpc->ac->eqs[cq->comp_vector];
cq_spec.attached_eq = eq->eq->id;
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
@@ -212,18 +201,18 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
}
/* The GDMA regions are now owned by the WQ object */
- wq->gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
- wq->id = wq_spec.queue_index;
- cq->id = cq_spec.queue_index;
+ wq->queue.id = wq_spec.queue_index;
+ cq->queue.id = cq_spec.queue_index;
ibdev_dbg(&mdev->ib_dev,
- "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
- ret, wq->rx_object, wq->id, cq->id);
+ "rx_object 0x%llx wq id %llu cq id %llu\n",
+ wq->rx_object, wq->queue.id, cq->queue.id);
- resp.entries[i].cqid = cq->id;
- resp.entries[i].wqid = wq->id;
+ resp.entries[i].cqid = cq->queue.id;
+ resp.entries[i].wqid = wq->queue.id;
mana_ind_table[i] = wq->rx_object;
@@ -231,8 +220,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ret = mana_ib_install_cq_cb(mdev, cq);
if (ret)
goto fail;
-
- gdma_cq_allocated[i] = gc->cq_table[cq->id];
}
resp.num_entries = i;
@@ -252,7 +239,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail;
}
- kfree(gdma_cq_allocated);
kfree(mana_ind_table);
return 0;
@@ -264,13 +250,10 @@ fail:
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- gc->cq_table[cq->id] = NULL;
- kfree(gdma_cq_allocated[i]);
-
+ mana_ib_remove_cq_cb(mdev, cq);
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
}
- kfree(gdma_cq_allocated);
kfree(mana_ind_table);
return ret;
@@ -289,15 +272,12 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
- struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_ib_create_qp_resp resp = {};
struct mana_ib_create_qp ucmd = {};
- struct gdma_queue *gdma_cq = NULL;
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc;
struct net_device *ndev;
- struct ib_umem *umem;
struct mana_eq *eq;
int eq_vec;
u32 port;
@@ -346,56 +326,39 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
ucmd.sq_buf_addr, ucmd.port);
- umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev,
- "Failed to get umem for create qp-raw, err %d\n",
- err);
- goto err_free_vport;
- }
- qp->sq_umem = umem;
-
- err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
- &qp->sq_gdma_region);
+ err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
if (err) {
ibdev_dbg(&mdev->ib_dev,
- "Failed to create dma region for create qp-raw, %d\n",
- err);
- goto err_release_umem;
+ "Failed to create queue for create qp-raw, err %d\n", err);
+ goto err_free_vport;
}
- ibdev_dbg(&mdev->ib_dev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, qp->sq_gdma_region);
-
/* Create a WQ on the same port handle used by the Ethernet */
- wq_spec.gdma_region = qp->sq_gdma_region;
+ wq_spec.gdma_region = qp->raw_sq.gdma_region;
wq_spec.queue_size = ucmd.sq_buf_size;
- cq_spec.gdma_region = send_cq->gdma_region;
+ cq_spec.gdma_region = send_cq->queue.gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq_vec = send_cq->comp_vector % gc->max_num_queues;
+ eq_vec = send_cq->comp_vector;
eq = &mpc->ac->eqs[eq_vec];
cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
- &cq_spec, &qp->tx_object);
+ &cq_spec, &qp->qp_handle);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create wq for create raw-qp, err %d\n",
err);
- goto err_destroy_dma_region;
+ goto err_destroy_queue;
}
/* The GDMA regions are now owned by the WQ object */
- qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
- send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
+ send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
- qp->sq_id = wq_spec.queue_index;
- send_cq->id = cq_spec.queue_index;
+ qp->raw_sq.id = wq_spec.queue_index;
+ send_cq->queue.id = cq_spec.queue_index;
/* Create CQ table entry */
err = mana_ib_install_cq_cb(mdev, send_cq);
@@ -403,11 +366,11 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto err_destroy_wq_obj;
ibdev_dbg(&mdev->ib_dev,
- "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
- qp->tx_object, qp->sq_id, send_cq->id);
+ "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
+ qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
- resp.sqid = qp->sq_id;
- resp.cqid = send_cq->id;
+ resp.sqid = qp->raw_sq.id;
+ resp.cqid = send_cq->queue.id;
resp.tx_vp_offset = pd->tx_vp_offset;
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -415,23 +378,19 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev,
"Failed copy udata for create qp-raw, %d\n",
err);
- goto err_release_gdma_cq;
+ goto err_remove_cq_cb;
}
return 0;
-err_release_gdma_cq:
- kfree(gdma_cq);
- gc->cq_table[send_cq->id] = NULL;
+err_remove_cq_cb:
+ mana_ib_remove_cq_cb(mdev, send_cq);
err_destroy_wq_obj:
- mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
-err_destroy_dma_region:
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
-
-err_release_umem:
- ib_umem_release(umem);
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
err_free_vport:
mana_ib_uncfg_vport(mdev, pd, port);
@@ -505,12 +464,9 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd);
- mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
- if (qp->sq_umem) {
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
- ib_umem_release(qp->sq_umem);
- }
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
mana_ib_uncfg_vport(mdev, pd, qp->port);
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 7c9c699625..f959f4b924 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -13,7 +13,6 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_create_wq ucmd = {};
struct mana_ib_wq *wq;
- struct ib_umem *umem;
int err;
if (udata->inlen < sizeof(ucmd))
@@ -32,39 +31,18 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
- umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
+ err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue);
+ if (err) {
ibdev_dbg(&mdev->ib_dev,
- "Failed to get umem for create wq, err %d\n", err);
+ "Failed to create queue for create wq, %d\n", err);
goto err_free_wq;
}
- wq->umem = umem;
wq->wqe = init_attr->max_wr;
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
-
- err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
- if (err) {
- ibdev_dbg(&mdev->ib_dev,
- "Failed to create dma region for create wq, %d\n",
- err);
- goto err_release_umem;
- }
-
- ibdev_dbg(&mdev->ib_dev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, wq->gdma_region);
-
- /* WQ ID is returned at wq_create time, doesn't know the value yet */
-
return &wq->ibwq;
-err_release_umem:
- ib_umem_release(umem);
-
err_free_wq:
kfree(wq);
@@ -86,8 +64,7 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
- mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
- ib_umem_release(wq->umem);
+ mana_ib_destroy_queue(mdev, &wq->queue);
kfree(wq);