diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/infiniband/hw/mana/main.c | 291 |
1 files changed, 283 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 4524c6b807..2a41135764 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -261,9 +261,7 @@ int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, } queue->umem = umem; - ibdev_dbg(&mdev->ib_dev, - "create_dma_region ret %d gdma_region 0x%llx\n", - err, queue->gdma_region); + ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region); return 0; free_umem: @@ -527,11 +525,18 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { - /* - * This version only support RAW_PACKET - * other values need to be filled for other types - */ + struct ib_port_attr attr; + int err; + + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; + if (port_num == 1) + immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; return 0; } @@ -557,7 +562,42 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, int mana_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { - /* This version doesn't return port properties */ + struct net_device *ndev = mana_ib_get_netdev(ibdev, port); + + if (!ndev) + return -EINVAL; + + memset(props, 0, sizeof(*props)); + props->max_mtu = IB_MTU_4096; + props->active_mtu = ib_mtu_int_to_enum(ndev->mtu); + + if (netif_carrier_ok(ndev) && netif_running(ndev)) { + props->state = IB_PORT_ACTIVE; + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; + } else { + props->state = IB_PORT_DOWN; + props->phys_state = IB_PORT_PHYS_STATE_DISABLED; + } + + props->active_width = IB_WIDTH_4X; + props->active_speed = IB_SPEED_EDR; + props->pkey_tbl_len = 1; + if (port == 1) + props->gid_tbl_len = 16; + + return 0; +} + +enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num) +{ + return IB_LINK_LAYER_ETHERNET; +} + +int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) +{ + if (index != 0) + return -EINVAL; + *pkey = IB_DEFAULT_PKEY_FULL; return 0; } @@ -613,3 +653,238 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev) return 0; } + +int mana_ib_create_eqs(struct mana_ib_dev *mdev) +{ + struct gdma_context *gc = mdev_to_gc(mdev); + struct gdma_queue_spec spec = {}; + int err, i; + + spec.type = GDMA_EQ; + spec.monitor_avl_buf = false; + spec.queue_size = EQ_SIZE; + spec.eq.callback = NULL; + spec.eq.context = mdev; + spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; + spec.eq.msix_index = 0; + + err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq); + if (err) + return err; + + mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *), + GFP_KERNEL); + if (!mdev->eqs) { + err = -ENOMEM; + goto destroy_fatal_eq; + } + + for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) { + spec.eq.msix_index = (i + 1) % gc->num_msix_usable; + err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]); + if (err) + goto destroy_eqs; + } + + return 0; + +destroy_eqs: + while (i-- > 0) + mana_gd_destroy_queue(gc, mdev->eqs[i]); + kfree(mdev->eqs); +destroy_fatal_eq: + mana_gd_destroy_queue(gc, mdev->fatal_err_eq); + return err; +} + +void mana_ib_destroy_eqs(struct mana_ib_dev *mdev) +{ + struct gdma_context *gc = mdev_to_gc(mdev); + int i; + + mana_gd_destroy_queue(gc, mdev->fatal_err_eq); + + for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) + mana_gd_destroy_queue(gc, mdev->eqs[i]); + + kfree(mdev->eqs); +} + +int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev) +{ + struct mana_rnic_create_adapter_resp resp = {}; + struct mana_rnic_create_adapter_req req = {}; + struct gdma_context *gc = mdev_to_gc(mdev); + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp)); + req.hdr.req.msg_version = GDMA_MESSAGE_V2; + req.hdr.dev_id = gc->mana_ib.dev_id; + req.notify_eq_id = mdev->fatal_err_eq->id; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err); + return err; + } + mdev->adapter_handle = resp.adapter; + + return 0; +} + +int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev) +{ + struct mana_rnic_destroy_adapter_resp resp = {}; + struct mana_rnic_destroy_adapter_req req = {}; + struct gdma_context *gc; + int err; + + gc = mdev_to_gc(mdev); + mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err); + return err; + } + + return 0; +} + +int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context) +{ + struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev); + enum rdma_network_type ntype = rdma_gid_attr_network_type(attr); + struct mana_rnic_config_addr_resp resp = {}; + struct gdma_context *gc = mdev_to_gc(mdev); + struct mana_rnic_config_addr_req req = {}; + int err; + + if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) { + ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype); + return -EINVAL; + } + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + req.op = ADDR_OP_ADD; + req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4; + copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid)); + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err); + return err; + } + + return 0; +} + +int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context) +{ + struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev); + enum rdma_network_type ntype = rdma_gid_attr_network_type(attr); + struct mana_rnic_config_addr_resp resp = {}; + struct gdma_context *gc = mdev_to_gc(mdev); + struct mana_rnic_config_addr_req req = {}; + int err; + + if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) { + ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype); + return -EINVAL; + } + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + req.op = ADDR_OP_REMOVE; + req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4; + copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid)); + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err); + return err; + } + + return 0; +} + +int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac) +{ + struct mana_rnic_config_mac_addr_resp resp = {}; + struct mana_rnic_config_mac_addr_req req = {}; + struct gdma_context *gc = mdev_to_gc(mdev); + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + req.op = op; + copy_in_reverse(req.mac_addr, mac, ETH_ALEN); + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err); + return err; + } + + return 0; +} + +int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell) +{ + struct gdma_context *gc = mdev_to_gc(mdev); + struct mana_rnic_create_cq_resp resp = {}; + struct mana_rnic_create_cq_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + req.gdma_region = cq->queue.gdma_region; + req.eq_id = mdev->eqs[cq->comp_vector]->id; + req.doorbell_page = doorbell; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err); + return err; + } + + cq->queue.id = resp.cq_id; + cq->cq_handle = resp.cq_handle; + /* The GDMA region is now owned by the CQ handle */ + cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; + + return 0; +} + +int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) +{ + struct gdma_context *gc = mdev_to_gc(mdev); + struct mana_rnic_destroy_cq_resp resp = {}; + struct mana_rnic_destroy_cq_req req = {}; + int err; + + if (cq->cq_handle == INVALID_MANA_HANDLE) + return 0; + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + req.cq_handle = cq->cq_handle; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err); + return err; + } + + return 0; +} |