summaryrefslogtreecommitdiffstats
path: root/debian/patches/features/all/ethernet-microsoft/0012-net-mana-Define-data-structures-for-protection-domai.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/features/all/ethernet-microsoft/0012-net-mana-Define-data-structures-for-protection-domai.patch')
-rw-r--r--debian/patches/features/all/ethernet-microsoft/0012-net-mana-Define-data-structures-for-protection-domai.patch353
1 files changed, 353 insertions, 0 deletions
diff --git a/debian/patches/features/all/ethernet-microsoft/0012-net-mana-Define-data-structures-for-protection-domai.patch b/debian/patches/features/all/ethernet-microsoft/0012-net-mana-Define-data-structures-for-protection-domai.patch
new file mode 100644
index 000000000..fb9a2a84d
--- /dev/null
+++ b/debian/patches/features/all/ethernet-microsoft/0012-net-mana-Define-data-structures-for-protection-domai.patch
@@ -0,0 +1,353 @@
+From b6589e00fed6f5a53bd4ffd172b7e4d2d300447c Mon Sep 17 00:00:00 2001
+From: Ajay Sharma <sharmaajay@microsoft.com>
+Date: Thu, 3 Nov 2022 12:16:29 -0700
+Subject: [PATCH 12/23] net: mana: Define data structures for protection domain
+ and memory registration
+
+The MANA hardware support protection domain and memory registration for use
+in RDMA environment. Add those definitions and expose them for use by the
+RDMA driver.
+
+Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
+Signed-off-by: Long Li <longli@microsoft.com>
+Link: https://lore.kernel.org/r/1667502990-2559-12-git-send-email-longli@linuxonhyperv.com
+Reviewed-by: Dexuan Cui <decui@microsoft.com>
+Acked-by: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+(cherry picked from commit 28c66cfa45388af1126985d1114e0ed762eb2abd)
+Signed-off-by: Bastian Blank <waldi@debian.org>
+---
+ .../net/ethernet/microsoft/mana/gdma_main.c | 27 ++--
+ drivers/net/ethernet/microsoft/mana/mana_en.c | 18 +--
+ include/net/mana/gdma.h | 121 +++++++++++++++++-
+ 3 files changed, 143 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index b114c31d70ba..690691e3e86c 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
+ req.type = queue->type;
+ req.pdid = queue->gdma_dev->pdid;
+ req.doolbell_id = queue->gdma_dev->doorbell;
+- req.gdma_region = queue->mem_info.gdma_region;
++ req.gdma_region = queue->mem_info.dma_region_handle;
+ req.queue_size = queue->queue_size;
+ req.log2_throttle_limit = queue->eq.log2_throttle_limit;
+ req.eq_pci_msix_index = queue->eq.msix_index;
+@@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
+
+ queue->id = resp.queue_index;
+ queue->eq.disable_needed = true;
+- queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
++ queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+ return 0;
+ }
+
+@@ -671,24 +671,30 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
+ return err;
+ }
+
+-static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
++int mana_gd_destroy_dma_region(struct gdma_context *gc,
++ gdma_obj_handle_t dma_region_handle)
+ {
+ struct gdma_destroy_dma_region_req req = {};
+ struct gdma_general_resp resp = {};
+ int err;
+
+- if (gdma_region == GDMA_INVALID_DMA_REGION)
+- return;
++ if (dma_region_handle == GDMA_INVALID_DMA_REGION)
++ return 0;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
+ sizeof(resp));
+- req.gdma_region = gdma_region;
++ req.dma_region_handle = dma_region_handle;
+
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+- if (err || resp.hdr.status)
++ if (err || resp.hdr.status) {
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
++ return -EPROTO;
++ }
++
++ return 0;
+ }
++EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
+
+ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ struct gdma_mem_info *gmi)
+@@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ if (err)
+ goto out;
+
+- if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
++ if (resp.hdr.status ||
++ resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
+ dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
+ resp.hdr.status);
+ err = -EPROTO;
+ goto out;
+ }
+
+- gmi->gdma_region = resp.gdma_region;
++ gmi->dma_region_handle = resp.dma_region_handle;
+ out:
+ kfree(req);
+ return err;
+@@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
+ return;
+ }
+
+- mana_gd_destroy_dma_region(gc, gmi->gdma_region);
++ mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
+ mana_gd_free_memory(gmi);
+ kfree(queue);
+ }
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index 958e55c936b5..9bce13714b25 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -1529,10 +1529,10 @@ static int mana_create_txq(struct mana_port_context *apc,
+ memset(&wq_spec, 0, sizeof(wq_spec));
+ memset(&cq_spec, 0, sizeof(cq_spec));
+
+- wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
++ wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
+ wq_spec.queue_size = txq->gdma_sq->queue_size;
+
+- cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
++ cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
+ cq_spec.queue_size = cq->gdma_cq->queue_size;
+ cq_spec.modr_ctx_id = 0;
+ cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
+@@ -1547,8 +1547,10 @@ static int mana_create_txq(struct mana_port_context *apc,
+ txq->gdma_sq->id = wq_spec.queue_index;
+ cq->gdma_cq->id = cq_spec.queue_index;
+
+- txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+- cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
++ txq->gdma_sq->mem_info.dma_region_handle =
++ GDMA_INVALID_DMA_REGION;
++ cq->gdma_cq->mem_info.dma_region_handle =
++ GDMA_INVALID_DMA_REGION;
+
+ txq->gdma_txq_id = txq->gdma_sq->id;
+
+@@ -1759,10 +1761,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
+
+ memset(&wq_spec, 0, sizeof(wq_spec));
+ memset(&cq_spec, 0, sizeof(cq_spec));
+- wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
++ wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
+ wq_spec.queue_size = rxq->gdma_rq->queue_size;
+
+- cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
++ cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
+ cq_spec.queue_size = cq->gdma_cq->queue_size;
+ cq_spec.modr_ctx_id = 0;
+ cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
+@@ -1775,8 +1777,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
+ rxq->gdma_rq->id = wq_spec.queue_index;
+ cq->gdma_cq->id = cq_spec.queue_index;
+
+- rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+- cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
++ rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
++ cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+
+ rxq->gdma_id = rxq->gdma_rq->id;
+ cq->gdma_id = cq->gdma_cq->id;
+diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
+index 202ac405ab59..aabc7cea8a49 100644
+--- a/include/net/mana/gdma.h
++++ b/include/net/mana/gdma.h
+@@ -27,6 +27,10 @@ enum gdma_request_type {
+ GDMA_CREATE_DMA_REGION = 25,
+ GDMA_DMA_REGION_ADD_PAGES = 26,
+ GDMA_DESTROY_DMA_REGION = 27,
++ GDMA_CREATE_PD = 29,
++ GDMA_DESTROY_PD = 30,
++ GDMA_CREATE_MR = 31,
++ GDMA_DESTROY_MR = 32,
+ };
+
+ enum gdma_queue_type {
+@@ -57,6 +61,8 @@ enum {
+ GDMA_DEVICE_MANA = 2,
+ };
+
++typedef u64 gdma_obj_handle_t;
++
+ struct gdma_resource {
+ /* Protect the bitmap */
+ spinlock_t lock;
+@@ -190,7 +196,7 @@ struct gdma_mem_info {
+ u64 length;
+
+ /* Allocated by the PF driver */
+- u64 gdma_region;
++ gdma_obj_handle_t dma_region_handle;
+ };
+
+ #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
+@@ -605,7 +611,7 @@ struct gdma_create_queue_req {
+ u32 reserved1;
+ u32 pdid;
+ u32 doolbell_id;
+- u64 gdma_region;
++ gdma_obj_handle_t gdma_region;
+ u32 reserved2;
+ u32 queue_size;
+ u32 log2_throttle_limit;
+@@ -632,6 +638,28 @@ struct gdma_disable_queue_req {
+ u32 alloc_res_id_on_creation;
+ }; /* HW DATA */
+
++enum atb_page_size {
++ ATB_PAGE_SIZE_4K,
++ ATB_PAGE_SIZE_8K,
++ ATB_PAGE_SIZE_16K,
++ ATB_PAGE_SIZE_32K,
++ ATB_PAGE_SIZE_64K,
++ ATB_PAGE_SIZE_128K,
++ ATB_PAGE_SIZE_256K,
++ ATB_PAGE_SIZE_512K,
++ ATB_PAGE_SIZE_1M,
++ ATB_PAGE_SIZE_2M,
++ ATB_PAGE_SIZE_MAX,
++};
++
++enum gdma_mr_access_flags {
++ GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
++ GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
++ GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
++ GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
++ GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
++};
++
+ /* GDMA_CREATE_DMA_REGION */
+ struct gdma_create_dma_region_req {
+ struct gdma_req_hdr hdr;
+@@ -658,14 +686,14 @@ struct gdma_create_dma_region_req {
+
+ struct gdma_create_dma_region_resp {
+ struct gdma_resp_hdr hdr;
+- u64 gdma_region;
++ gdma_obj_handle_t dma_region_handle;
+ }; /* HW DATA */
+
+ /* GDMA_DMA_REGION_ADD_PAGES */
+ struct gdma_dma_region_add_pages_req {
+ struct gdma_req_hdr hdr;
+
+- u64 gdma_region;
++ gdma_obj_handle_t dma_region_handle;
+
+ u32 page_addr_list_len;
+ u32 reserved3;
+@@ -677,9 +705,88 @@ struct gdma_dma_region_add_pages_req {
+ struct gdma_destroy_dma_region_req {
+ struct gdma_req_hdr hdr;
+
+- u64 gdma_region;
++ gdma_obj_handle_t dma_region_handle;
+ }; /* HW DATA */
+
++enum gdma_pd_flags {
++ GDMA_PD_FLAG_INVALID = 0,
++};
++
++struct gdma_create_pd_req {
++ struct gdma_req_hdr hdr;
++ enum gdma_pd_flags flags;
++ u32 reserved;
++};/* HW DATA */
++
++struct gdma_create_pd_resp {
++ struct gdma_resp_hdr hdr;
++ gdma_obj_handle_t pd_handle;
++ u32 pd_id;
++ u32 reserved;
++};/* HW DATA */
++
++struct gdma_destroy_pd_req {
++ struct gdma_req_hdr hdr;
++ gdma_obj_handle_t pd_handle;
++};/* HW DATA */
++
++struct gdma_destory_pd_resp {
++ struct gdma_resp_hdr hdr;
++};/* HW DATA */
++
++enum gdma_mr_type {
++ /* Guest Virtual Address - MRs of this type allow access
++ * to memory mapped by PTEs associated with this MR using a virtual
++ * address that is set up in the MST
++ */
++ GDMA_MR_TYPE_GVA = 2,
++};
++
++struct gdma_create_mr_params {
++ gdma_obj_handle_t pd_handle;
++ enum gdma_mr_type mr_type;
++ union {
++ struct {
++ gdma_obj_handle_t dma_region_handle;
++ u64 virtual_address;
++ enum gdma_mr_access_flags access_flags;
++ } gva;
++ };
++};
++
++struct gdma_create_mr_request {
++ struct gdma_req_hdr hdr;
++ gdma_obj_handle_t pd_handle;
++ enum gdma_mr_type mr_type;
++ u32 reserved_1;
++
++ union {
++ struct {
++ gdma_obj_handle_t dma_region_handle;
++ u64 virtual_address;
++ enum gdma_mr_access_flags access_flags;
++ } gva;
++
++ };
++ u32 reserved_2;
++};/* HW DATA */
++
++struct gdma_create_mr_response {
++ struct gdma_resp_hdr hdr;
++ gdma_obj_handle_t mr_handle;
++ u32 lkey;
++ u32 rkey;
++};/* HW DATA */
++
++struct gdma_destroy_mr_request {
++ struct gdma_req_hdr hdr;
++ gdma_obj_handle_t mr_handle;
++};/* HW DATA */
++
++struct gdma_destroy_mr_response {
++ struct gdma_resp_hdr hdr;
++};/* HW DATA */
++
+ int mana_gd_verify_vf_version(struct pci_dev *pdev);
+
+ int mana_gd_register_device(struct gdma_dev *gd);
+@@ -706,4 +813,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi);
+
+ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
+ u32 resp_len, void *resp);
++
++int mana_gd_destroy_dma_region(struct gdma_context *gc,
++ gdma_obj_handle_t dma_region_handle);
++
+ #endif /* _GDMA_H */
+--
+2.40.1
+