summaryrefslogtreecommitdiffstats
path: root/drivers/pci/endpoint
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/pci/endpoint
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/pci/endpoint/Kconfig33
-rw-r--r--drivers/pci/endpoint/Makefile8
-rw-r--r--drivers/pci/endpoint/functions/Kconfig49
-rw-r--r--drivers/pci/endpoint/functions/Makefile9
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c716
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c2149
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c1021
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1468
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c767
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c931
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c272
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c538
12 files changed, 7961 insertions, 0 deletions
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
new file mode 100644
index 0000000000..17bbdc9bbd
--- /dev/null
+++ b/drivers/pci/endpoint/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# PCI Endpoint Support
+#
+
+menu "PCI Endpoint"
+
+config PCI_ENDPOINT
+ bool "PCI Endpoint Support"
+ depends on HAVE_PCI
+ help
+ Enable this configuration option to support configurable PCI
+ endpoint. This should be enabled if the platform has a PCI
+ controller that can operate in endpoint mode.
+
+ Enabling this option will build the endpoint library, which
+ includes endpoint controller library and endpoint function
+ library.
+
+ If in doubt, say "N" to disable Endpoint support.
+
+config PCI_ENDPOINT_CONFIGFS
+ bool "PCI Endpoint Configfs Support"
+ depends on PCI_ENDPOINT
+ select CONFIGFS_FS
+ help
+ This will enable the configfs entry that can be used to
+ configure the endpoint function and used to bind the
+ function with a endpoint controller.
+
+source "drivers/pci/endpoint/functions/Kconfig"
+
+endmenu
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
new file mode 100644
index 0000000000..95b2fe47e3
--- /dev/null
+++ b/drivers/pci/endpoint/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for PCI Endpoint Support
+#
+
+obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
+obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
+ pci-epc-mem.o functions/
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
new file mode 100644
index 0000000000..0c9cea0698
--- /dev/null
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# PCI Endpoint Functions
+#
+
+config PCI_EPF_TEST
+ tristate "PCI Endpoint Test driver"
+ depends on PCI_ENDPOINT
+ select CRC32
+ help
+ Enable this configuration option to enable the test driver
+ for PCI Endpoint.
+
+ If in doubt, say "N" to disable Endpoint test driver.
+
+config PCI_EPF_NTB
+ tristate "PCI Endpoint NTB driver"
+ depends on PCI_ENDPOINT
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCI Endpoint. NTB driver implements NTB
+ controller functionality using multiple PCIe endpoint instances.
+ It can support NTB endpoint function devices created using
+ device tree.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_VNTB
+ tristate "PCI Endpoint Virtual NTB driver"
+ depends on PCI_ENDPOINT
+ depends on NTB
+ select CONFIGFS_FS
+ help
+ Select this configuration option to enable the Non-Transparent
+ Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB
+ between PCI Root Port and PCIe Endpoint.
+
+ If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_MHI
+ tristate "PCI Endpoint driver for MHI bus"
+ depends on PCI_ENDPOINT && MHI_BUS_EP
+ help
+ Enable this configuration option to enable the PCI Endpoint
+ driver for Modem Host Interface (MHI) bus in Qualcomm Endpoint
+ devices such as SDX55.
+
+ If in doubt, say "N" to disable Endpoint driver for MHI bus.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
new file mode 100644
index 0000000000..696473fce5
--- /dev/null
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for PCI Endpoint Functions
+#
+
+obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
+obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o
+obj-$(CONFIG_PCI_EPF_MHI) += pci-epf-mhi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
new file mode 100644
index 0000000000..6dc918a8a0
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI EPF driver for MHI Endpoint devices
+ *
+ * Copyright (C) 2023 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/mhi_ep.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#define MHI_VERSION_1_0 0x01000000
+
+#define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
+
+/* Platform specific flags */
+#define MHI_EPF_USE_DMA BIT(0)
+
+struct pci_epf_mhi_ep_info {
+ const struct mhi_ep_cntrl_config *config;
+ struct pci_epf_header *epf_header;
+ enum pci_barno bar_num;
+ u32 epf_flags;
+ u32 msi_count;
+ u32 mru;
+ u32 flags;
+};
+
+#define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = direction, \
+ }
+
+#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
+ MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
+
+#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
+ MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
+
+static const struct mhi_ep_channel_config mhi_v1_channels[] = {
+ MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
+ MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
+};
+
+static const struct mhi_ep_cntrl_config mhi_v1_config = {
+ .max_channels = 128,
+ .num_channels = ARRAY_SIZE(mhi_v1_channels),
+ .ch_cfg = mhi_v1_channels,
+ .mhi_version = MHI_VERSION_1_0,
+};
+
+static struct pci_epf_header sdx55_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306,
+ .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
+ .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sdx55_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sdx55_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+};
+
+static struct pci_epf_header sm8450_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306,
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sm8450_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sm8450_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+ .flags = MHI_EPF_USE_DMA,
+};
+
+struct pci_epf_mhi {
+ const struct pci_epc_features *epc_features;
+ const struct pci_epf_mhi_ep_info *info;
+ struct mhi_ep_cntrl mhi_cntrl;
+ struct pci_epf *epf;
+ struct mutex lock;
+ void __iomem *mmio;
+ resource_size_t mmio_phys;
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
+ u32 mmio_size;
+ int irq;
+};
+
+static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
+{
+ return addr & (epf_mhi->epc_features->align -1);
+}
+
+static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
+ phys_addr_t *paddr, void __iomem **vaddr,
+ size_t offset, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+ int ret;
+
+ *vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
+ if (!*vaddr)
+ return -ENOMEM;
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
+ pci_addr - offset, size + offset);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
+ return ret;
+ }
+
+ *paddr = *paddr + offset;
+ *vaddr = *vaddr + offset;
+
+ return 0;
+}
+
+static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
+ phys_addr_t *paddr, void __iomem **vaddr,
+ size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ size_t offset = get_align_offset(epf_mhi, pci_addr);
+
+ return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
+ offset, size);
+}
+
+static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
+ u64 pci_addr, phys_addr_t paddr,
+ void __iomem *vaddr, size_t offset,
+ size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
+ pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
+ size + offset);
+}
+
+static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
+ phys_addr_t paddr, void __iomem *vaddr,
+ size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ size_t offset = get_align_offset(epf_mhi, pci_addr);
+
+ __pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
+ size);
+}
+
+static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ /*
+ * MHI supplies 0 based MSI vectors but the API expects the vector
+ * number to start from 1, so we need to increment the vector by 1.
+ */
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
+ vector + 1);
+}
+
+static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
+ if (ret) {
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+ memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
+
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);
+
+ mutex_unlock(&epf_mhi->lock);
+
+ return 0;
+}
+
+static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
+ if (ret) {
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+ memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
+
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);
+
+ mutex_unlock(&epf_mhi->lock);
+
+ return 0;
+}
+
+static void pci_epf_mhi_dma_callback(void *param)
+{
+ complete(param);
+}
+
+static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct dma_chan *chan = epf_mhi->dma_chan_rx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t dst_addr;
+ int ret;
+
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap:
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct dma_chan *chan = epf_mhi->dma_chan_tx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t src_addr;
+ int ret;
+
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap:
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+struct epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
+{
+ struct epf_dma_filter *filter = node;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev && filter->dma_mask &
+ caps.directions;
+}
+
+static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
+{
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct device *dev = &epf_mhi->epf->dev;
+ struct epf_dma_filter filter;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ filter.dev = dma_dev;
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
+ &filter);
+ if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
+ dev_err(dev, "Failed to request tx channel\n");
+ return -ENODEV;
+ }
+
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+ epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
+ &filter);
+ if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
+ dev_err(dev, "Failed to request rx channel\n");
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ epf_mhi->dma_chan_tx = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
+{
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ dma_release_channel(epf_mhi->dma_chan_rx);
+ epf_mhi->dma_chan_tx = NULL;
+ epf_mhi->dma_chan_rx = NULL;
+}
+
+static int pci_epf_mhi_core_init(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ epf_bar->phys_addr = epf_mhi->mmio_phys;
+ epf_bar->size = epf_mhi->mmio_size;
+ epf_bar->barno = info->bar_num;
+ epf_bar->flags = info->epf_flags;
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Failed to set BAR: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ order_base_2(info->msi_count));
+ if (ret) {
+ dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
+ epf->header);
+ if (ret) {
+ dev_err(dev, "Failed to set Configuration header: %d\n", ret);
+ return ret;
+ }
+
+ epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
+ if (!epf_mhi->epc_features)
+ return -ENODATA;
+
+ return 0;
+}
+
+static int pci_epf_mhi_link_up(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (info->flags & MHI_EPF_USE_DMA) {
+ ret = pci_epf_mhi_dma_init(epf_mhi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
+ mhi_cntrl->mmio = epf_mhi->mmio;
+ mhi_cntrl->irq = epf_mhi->irq;
+ mhi_cntrl->mru = info->mru;
+
+ /* Assign the struct dev of PCI EP as MHI controller device */
+ mhi_cntrl->cntrl_dev = epc->dev.parent;
+ mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
+ mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
+ mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
+ if (info->flags & MHI_EPF_USE_DMA) {
+ mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
+ mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
+ } else {
+ mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
+ mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
+ }
+
+ /* Register the MHI EP controller */
+ ret = mhi_ep_register_controller(mhi_cntrl, info->config);
+ if (ret) {
+ dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pci_epf_mhi_link_down(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+
+ if (mhi_cntrl->mhi_dev) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ }
+
+ return 0;
+}
+
+static int pci_epf_mhi_bme(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ /*
+ * Power up the MHI EP stack if link is up and stack is in power down
+ * state.
+ */
+ if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
+ ret = mhi_ep_power_up(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_mhi_bind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ struct platform_device *pdev = to_platform_device(epc->dev.parent);
+ struct resource *res;
+ int ret;
+
+ /* Get MMIO base address from Endpoint controller */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ epf_mhi->mmio_phys = res->start;
+ epf_mhi->mmio_size = resource_size(res);
+
+ epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
+ if (!epf_mhi->mmio)
+ return -ENOMEM;
+
+ ret = platform_get_irq_byname(pdev, "doorbell");
+ if (ret < 0) {
+ iounmap(epf_mhi->mmio);
+ return ret;
+ }
+
+ epf_mhi->irq = ret;
+
+ return 0;
+}
+
+static void pci_epf_mhi_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+
+ /*
+ * Forcefully power down the MHI EP stack. Only way to bring the MHI EP
+ * stack back to working state after successive bind is by getting BME
+ * from host.
+ */
+ if (mhi_cntrl->mhi_dev) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ }
+
+ iounmap(epf_mhi->mmio);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+}
+
+static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
+ .core_init = pci_epf_mhi_core_init,
+ .link_up = pci_epf_mhi_link_up,
+ .link_down = pci_epf_mhi_link_down,
+ .bme = pci_epf_mhi_bme,
+};
+
+static int pci_epf_mhi_probe(struct pci_epf *epf,
+ const struct pci_epf_device_id *id)
+{
+ struct pci_epf_mhi_ep_info *info =
+ (struct pci_epf_mhi_ep_info *)id->driver_data;
+ struct pci_epf_mhi *epf_mhi;
+ struct device *dev = &epf->dev;
+
+ epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
+ if (!epf_mhi)
+ return -ENOMEM;
+
+ epf->header = info->epf_header;
+ epf_mhi->info = info;
+ epf_mhi->epf = epf;
+
+ epf->event_ops = &pci_epf_mhi_event_ops;
+
+ mutex_init(&epf_mhi->lock);
+
+ epf_set_drvdata(epf, epf_mhi);
+
+ return 0;
+}
+
+static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
+ { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
+ { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
+ {},
+};
+
+static struct pci_epf_ops pci_epf_mhi_ops = {
+ .unbind = pci_epf_mhi_unbind,
+ .bind = pci_epf_mhi_bind,
+};
+
+static struct pci_epf_driver pci_epf_mhi_driver = {
+ .driver.name = "pci_epf_mhi",
+ .probe = pci_epf_mhi_probe,
+ .id_table = pci_epf_mhi_ids,
+ .ops = &pci_epf_mhi_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_mhi_init(void)
+{
+ return pci_epf_register_driver(&pci_epf_mhi_driver);
+}
+module_init(pci_epf_mhi_init);
+
+static void __exit pci_epf_mhi_exit(void)
+{
+ pci_epf_unregister_driver(&pci_epf_mhi_driver);
+}
+module_exit(pci_epf_mhi_exit);
+
+MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
new file mode 100644
index 0000000000..9aac2c6f3b
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -0,0 +1,2149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/*
+ * The PCI NTB function driver configures the SoC with multiple PCIe Endpoint
+ * (EP) controller instances (see diagram below) in such a way that
+ * transactions from one EP controller are routed to the other EP controller.
+ * Once PCI NTB function driver configures the SoC with multiple EP instances,
+ * HOST1 and HOST2 can communicate with each other using SoC as a bridge.
+ *
+ * +-------------+ +-------------+
+ * | | | |
+ * | HOST1 | | HOST2 |
+ * | | | |
+ * +------^------+ +------^------+
+ * | |
+ * | |
+ * +---------|-------------------------------------------------|---------+
+ * | +------v------+ +------v------+ |
+ * | | | | | |
+ * | | EP | | EP | |
+ * | | CONTROLLER1 | | CONTROLLER2 | |
+ * | | <-----------------------------------> | |
+ * | | | | | |
+ * | | | | | |
+ * | | | SoC With Multiple EP Instances | | |
+ * | | | (Configured using NTB Function) | | |
+ * | +-------------+ +-------------+ |
+ * +---------------------------------------------------------------------+
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_PEER_SPAD,
+ BAR_DB_MW1,
+ BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+};
+
+struct epf_ntb {
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ struct pci_epf *epf;
+ u64 mws_size[MAX_MW];
+ struct config_group group;
+ struct epf_ntb_epc *epc[2];
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+
+struct epf_ntb_epc {
+ u8 func_no;
+ u8 vfunc_no;
+ bool linkup;
+ bool is_msix;
+ int msix_bar;
+ u32 spad_size;
+ struct pci_epc *epc;
+ struct epf_ntb *epf_ntb;
+ void __iomem *mw_addr[6];
+ size_t msix_table_offset;
+ struct epf_ntb_ctrl *reg;
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno epf_ntb_bar[6];
+ struct delayed_work cmd_handler;
+ enum pci_epc_interface_type type;
+ const struct pci_epc_features *epc_features;
+};
+
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 mw1_offset;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to both the hosts
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST1 and the NTB function in HOST2 invoke
+ * ntb_link_enable(), this NTB function driver will trigger a link event to
+ * the NTB client in both the hosts.
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ enum pci_epc_interface_type type;
+ enum pci_epc_irq_type irq_type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ struct pci_epc *epc;
+ u8 func_no, vfunc_no;
+ bool is_msix;
+ int ret;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ is_msix = ntb_epc->is_msix;
+ ctrl = ntb_epc->reg;
+ if (link_up)
+ ctrl->link_status |= LINK_STATUS_UP;
+ else
+ ctrl->link_status &= ~LINK_STATUS_UP;
+ irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
+ ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to raise Link Up IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for one host
+ * to access the memory window of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see MW1) i.e., map OB
+ * address space of memory window to PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address in the outbound address space
+ * 2) Address in the PCI Address space
+ * 3) Size of the address region to be mapped
+ *
+ * The address in the outbound address space (for MW1, MW2, MW3 and MW4) is
+ * stored in epf_bar corresponding to BAR_DB_MW1 for MW1 and BAR_MW2, BAR_MW3
+ * BAR_MW4 for rest of the BARs of epf_ntb_epc that is connected to HOST1. This
+ * is populated in epf_ntb_alloc_peer_mem() in this driver.
+ *
+ * The address and size of the PCI address region that has to be mapped would
+ * be provided by HOST2 in ctrl->addr and ctrl->size of epf_ntb_epc that is
+ * connected to HOST2.
+ *
+ * Please note Memory window1 (MW1) and Doorbell registers together will be
+ * mapped to a single BAR (BAR2) above for 32-bit BARs. The exact BAR that's
+ * used for Memory window (MW) can be obtained from epf_ntb_bar[BAR_DB_MW1],
+ * epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2].
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ u64 addr, size;
+ int ret = 0;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ addr = ctrl->addr;
+ size = ctrl->size;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+
+ if (size > ntb->mws_size[mw]) {
+ dev_err(&epc->dev,
+ "%s intf: MW: %d Req Sz:%llxx > Supported Sz:%llx\n",
+ pci_epc_interface_string(type), mw, size,
+ ntb->mws_size[mw]);
+ ret = -EINVAL;
+ goto err_invalid_size;
+ }
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&epc->dev,
+ "%s intf: Failed to map memory window %d address\n",
+ pci_epc_interface_string(type), mw);
+
+err_invalid_size:
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u32 mw)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ struct epf_ntb_ctrl *ctrl;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ phys_addr = peer_epf_bar->phys_addr;
+ ctrl = ntb_epc->reg;
+ if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
+ phys_addr += ctrl->mw1_offset;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
+}
+
+/**
+ * epf_ntb_configure_msi() - Map OB address space to MSI address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +---+-------> MSI ADDRESS |
+ *+-----------------+ | +----------------+ | +-----------------+
+ *| BAR1 | | | Doorbell 2 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +---+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR3 | | | Doorbell 4 +---+ | |
+ *+-----------------+ | |----------------+ | |
+ *| BAR4 | | | | | |
+ *+-----------------+ | | MW1 | | |
+ *| BAR5 | | | | | |
+ *+-----------------+ +----->-----------------+ | |
+ * EP CONTROLLER 1 | | | |
+ * | | | |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * pci_epc_map_msi_irq() takes the MSI address from MSI capability register
+ * and maps the OB address (obtained in epf_ntb_alloc_peer_mem()) to the MSI
+ * address.
+ *
+ * epf_ntb_configure_msi() also stores the MSI data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msi(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type, u16 db_count)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ u32 db_entry_size, db_data, db_offset;
+ struct pci_epf_bar *peer_epf_bar;
+ struct epf_ntb_ctrl *peer_ctrl;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ peer_ctrl = peer_ntb_epc->reg;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
+ db_entry_size, &db_data, &db_offset);
+ if (ret) {
+ dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ for (i = 0; i < db_count; i++) {
+ peer_ctrl->db_data[i] = db_data | i;
+ peer_ctrl->db_offset[i] = db_offset;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_msix() - Map OB address space to MSI-X address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Number of doorbell interrupts to map
+ *
+ *+-----------------+ +----->+----------------+-----------+-----------------+
+ *| BAR0 | | | Doorbell 1 +-----------> MSI-X ADDRESS 1 |
+ *+-----------------+ | +----------------+ +-----------------+
+ *| BAR1 | | | Doorbell 2 +---------+ | |
+ *+-----------------+----+ +----------------+ | | |
+ *| BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ *+-----------------+----+ +----------------+ | +-> MSI-X ADDRESS 2 |
+ *| BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ *+-----------------+ | |----------------+ | | | |
+ *| BAR4 | | | | | | +-----------------+
+ *+-----------------+ | | MW1 + | +-->+ MSI-X ADDRESS 3||
+ *| BAR5 | | | | | +-----------------+
+ *+-----------------+ +----->-----------------+ | | |
+ * EP CONTROLLER 1 | | | +-----------------+
+ * | | +---->+ MSI-X ADDRESS 4 |
+ * +----------------+ +-----------------+
+ * (A) EP CONTROLLER 2 | |
+ * (OB SPACE) | |
+ * | MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * This function performs stage (B) in the above diagram (see Doorbell 1,
+ * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to
+ * doorbell to MSI-X address in PCI address space.
+ *
+ * This operation requires 3 parameters
+ * 1) Address reserved for doorbell in the outbound address space
+ * 2) MSI-X address in the PCIe Address space
+ * 3) Number of MSI-X interrupts that has to be configured
+ *
+ * The address in the outbound address space (for the Doorbell) is stored in
+ * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to
+ * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along
+ * with address for MW1.
+ *
+ * The MSI-X address is in the MSI-X table of EP CONTROLLER 2 and
+ * the count of doorbell is in ctrl->argument of epf_ntb_epc that is connected
+ * to HOST2. MSI-X table is stored memory mapped to ntb_epc->msix_bar and the
+ * offset is in ntb_epc->msix_table_offset. From this epf_ntb_configure_msix()
+ * gets the MSI-X address and data.
+ *
+ * epf_ntb_configure_msix() also stores the MSI-X data to raise each interrupt
+ * in db_data of the peer's control region. This helps the peer to raise
+ * doorbell of the other host by writing db_data to the BAR corresponding to
+ * BAR_DB_MW1.
+ */
+static int epf_ntb_configure_msix(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct epf_ntb_ctrl *peer_ctrl;
+ u32 db_entry_size, msg_data;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ size_t align;
+ u64 msg_addr;
+ int ret, i;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ epf_bar = &ntb_epc->epf_bar[ntb_epc->msix_bar];
+ msix_tbl = epf_bar->addr + ntb_epc->msix_table_offset;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ peer_ctrl = peer_ntb_epc->reg;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ db_entry_size = peer_ctrl->db_entry_size;
+
+ for (i = 0; i < db_count; i++) {
+ msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
+ msg_data = msix_tbl[i].msg_data;
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr,
+ db_entry_size);
+ if (ret) {
+ dev_err(&epc->dev,
+ "%s intf: Failed to configure MSI-X IRQ\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ phys_addr = phys_addr + db_entry_size;
+ peer_ctrl->db_data[i] = msg_data;
+ peer_ctrl->db_offset[i] = msix_tbl[i].msg_addr & (align - 1);
+ }
+ ntb_epc->is_msix = true;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_db() - Configure the Outbound Address Space for one host
+ * to ring the doorbell of other host
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ * @db_count: Count of the number of doorbells that has to be configured
+ * @msix: Indicates whether MSI-X or MSI should be used
+ *
+ * Invokes epf_ntb_configure_msix() or epf_ntb_configure_msi() required for
+ * one HOST to ring the doorbell of other HOST.
+ */
+static int epf_ntb_configure_db(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type,
+ u16 db_count, bool msix)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ int ret;
+
+ if (db_count > MAX_DB_COUNT)
+ return -EINVAL;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ if (msix)
+ ret = epf_ntb_configure_msix(ntb, type, db_count);
+ else
+ ret = epf_ntb_configure_msi(ntb, type, db_count);
+
+ if (ret)
+ dev_err(&epc->dev, "%s intf: Failed to configure DB\n",
+ pci_epc_interface_string(type));
+
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_db() - Unmap address in OB address space to MSI/MSI-X
+ * address
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Invoke pci_epc_unmap_addr() to unmap OB address to MSI/MSI-X address.
+ */
+static void
+epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar;
+ enum pci_barno peer_barno;
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ ntb_epc = ntb->epc[type];
+ epc = ntb_epc->epc;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+ phys_addr = peer_epf_bar->phys_addr;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host
+ * @work: work_struct for the two epf_ntb_epc (PRIMARY and SECONDARY)
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB host. The host can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ u16 db_count;
+ bool is_msix;
+ int ret;
+
+ ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work);
+ ctrl = ntb_epc->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb_epc->reg;
+ type = ntb_epc->type;
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ db_count = argument & DB_COUNT_MASK;
+ is_msix = argument & MSIX_ENABLE;
+ ret = epf_ntb_configure_db(ntb, type, db_count, is_msix);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ epf_ntb_teardown_db(ntb, type);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, type, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, type, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb_epc->linkup = true;
+ if (ntb->epc[PRIMARY_INTERFACE]->linkup &&
+ ntb->epc[SECONDARY_INTERFACE]->linkup) {
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ }
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_DOWN:
+ ntb_epc->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "%s intf UNKNOWN command: %d\n",
+ pci_epc_interface_string(type), command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb_epc->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ *+-----------------+------->+------------------+ +-----------------+
+ *| BAR0 | | CONFIG REGION | | BAR0 |
+ *+-----------------+----+ +------------------+<-------+-----------------+
+ *| BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ *+-----------------+ +-->+------------------+<-------+-----------------+
+ *| BAR2 | Local Memory | BAR2 |
+ *+-----------------+ +-----------------+
+ *| BAR3 | | BAR3 |
+ *+-----------------+ +-----------------+
+ *| BAR4 | | BAR4 |
+ *+-----------------+ +-----------------+
+ *| BAR5 | | BAR5 |
+ *+-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Set BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad
+ * region. While BAR1 is the default peer scratchpad BAR, an NTB could have
+ * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs).
+ * This function can get the exact BAR used for peer scratchpad from
+ * epf_ntb_bar[BAR_PEER_SPAD].
+ *
+ * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function
+ * gets the address of peer scratchpad from
+ * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG].
+ */
+static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *peer_epf_bar, *epf_bar;
+ enum pci_barno peer_barno, barno;
+ u32 peer_spad_offset;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
+
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ epc = ntb_epc->epc;
+
+ peer_spad_offset = peer_ntb_epc->reg->spad_offset;
+ epf_bar->phys_addr = peer_epf_bar->phys_addr + peer_spad_offset;
+ epf_bar->size = peer_ntb_epc->spad_size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region. While BAR0 is the default self scratchpad BAR, an
+ * NTB could have other BARs for self scratchpad (because of reserved BARs).
+ * This function can get the exact BAR used for self scratchpad from
+ * epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note HOST2's peer
+ * scratchpad is HOST1's self scratchpad.
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct epf_ntb *ntb;
+ struct pci_epc *epc;
+ struct device *dev;
+ int ret;
+
+ ntb = ntb_epc->epf_ntb;
+ dev = &ntb->epf->dev;
+
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
+ pci_epc_interface_string(ntb_epc->type));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Free the Local Memory mentioned in the above diagram. After invoking this
+ * function, any of config + self scratchpad region of HOST1 or peer scratchpad
+ * region of HOST2 should not be accessed.
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ struct pci_epf *epf;
+
+ epf = ntb->epf;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ntb_epc = ntb->epc[type];
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ if (ntb_epc->reg)
+ pci_epf_free_space(epf, ntb_epc->reg, barno, type);
+ }
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * +-----------------+------->+------------------+ +-----------------+
+ * | BAR0 | | CONFIG REGION | | BAR0 |
+ * +-----------------+----+ +------------------+<-------+-----------------+
+ * | BAR1 | | |SCRATCHPAD REGION | | BAR1 |
+ * +-----------------+ +-->+------------------+<-------+-----------------+
+ * | BAR2 | Local Memory | BAR2 |
+ * +-----------------+ +-----------------+
+ * | BAR3 | | BAR3 |
+ * +-----------------+ +-----------------+
+ * | BAR4 | | BAR4 |
+ * +-----------------+ +-----------------+
+ * | BAR5 | | BAR5 |
+ * +-----------------+ +-----------------+
+ * EP CONTROLLER 1 EP CONTROLLER 2
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ *
+ * The size of both config region and scratchpad region has to be aligned,
+ * since the scratchpad region will also be mapped as PEER SCRATCHPAD of
+ * other host using a separate BAR.
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *peer_epc_features, *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ size_t msix_table_size, pba_size, align;
+ enum pci_barno peer_barno, barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size, peer_size;
+ struct pci_epf *epf;
+ struct device *dev;
+ bool msix_capable;
+ u32 spad_count;
+ void *base;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ ntb_epc = ntb->epc[type];
+
+ epc_features = ntb_epc->epc_features;
+ barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ peer_ntb_epc = ntb->epc[!type];
+ peer_epc_features = peer_ntb_epc->epc_features;
+ peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
+ peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+
+ /* Check if epc_features is populated incorrectly */
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = spad_count * 4;
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * ntb->db_count;
+ ctrl_size = ALIGN(ctrl_size, 8);
+ ntb_epc->msix_table_offset = ctrl_size;
+ ntb_epc->msix_bar = barno;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(ntb->db_count, 8), 8);
+ ctrl_size = ctrl_size + msix_table_size + pba_size;
+ }
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (peer_size) {
+ if (peer_size < spad_size)
+ spad_count = peer_size / 4;
+ spad_size = peer_size;
+ }
+
+ /*
+ * In order to make sure SPAD offset is aligned to its size,
+ * expand control region size to the size of SPAD if SPAD size
+ * is greater than control region size.
+ */
+ if (spad_size > ctrl_size)
+ ctrl_size = spad_size;
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, type);
+ if (!base) {
+ dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
+ pci_epc_interface_string(type));
+ return -ENOMEM;
+ }
+
+ ntb_epc->reg = base;
+
+ ctrl = ntb_epc->reg;
+ ctrl->spad_offset = ctrl_size;
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ctrl->db_entry_size = align ? align : 4;
+ ntb_epc->spad_size = spad_size;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc_interface() - Allocate memory for config +
+ * scratchpad region for each of PRIMARY and SECONDARY interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_config_spad_bar_alloc() which allocates memory for
+ * config + scratchpad region for a specific interface
+ */
+static int epf_ntb_config_spad_bar_alloc_interface(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_config_spad_bar_alloc(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/SPAD BAR alloc failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_free_peer_mem() - Free memory allocated in peers outbound address
+ * space
+ * @ntb_epc: EPC associated with one of the HOST which holds peers outbound
+ * address regions
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Free memory allocated in EP CONTROLLER 2 (OB SPACE) in the above diagram.
+ * It'll free Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3,
+ * MW4).
+ */
+static void epf_ntb_free_peer_mem(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ phys_addr_t phys_addr;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ struct pci_epc *epc;
+ size_t size;
+
+ epc = ntb_epc->epc;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ mw_addr = ntb_epc->mw_addr[barno];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ phys_addr = epf_bar->phys_addr;
+ size = epf_bar->size;
+ if (mw_addr) {
+ pci_epc_mem_free_addr(epc, phys_addr, mw_addr, size);
+ ntb_epc->mw_addr[barno] = NULL;
+ }
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_clear() - Clear doorbell and memory BAR
+ * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound
+ * address
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Clear doorbell and memory BARs (remove inbound ATU configuration). In the above
+ * diagram it clears BAR2 TO BAR5 of EP CONTROLLER 1 (Doorbell BAR, MW1 BAR, MW2
+ * BAR, MW3 BAR and MW4 BAR).
+ */
+static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
+{
+ struct pci_epf_bar *epf_bar;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+
+ epc = ntb_epc->epc;
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
+ }
+}
+
+/**
+ * epf_ntb_db_mw_bar_cleanup() - Clear doorbell/memory BAR and free memory
+ * allocated in peers outbound address space
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_db_mw_bar_clear() to clear HOST1's BAR and
+ * epf_ntb_free_peer_mem() which frees up HOST2 outbound memory.
+ */
+static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ epf_ntb_db_mw_bar_clear(ntb_epc);
+ epf_ntb_free_peer_mem(peer_ntb_epc);
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ bool msix_capable, msi_capable;
+ struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct device *dev;
+ u32 db_count;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ dev = &ntb->epf->dev;
+
+ epc_features = ntb_epc->epc_features;
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+
+ if (!(msix_capable || msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+ epc = ntb_epc->epc;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
+ if (ret) {
+ dev_err(dev, "%s intf: MSI configuration failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count,
+ ntb_epc->msix_bar,
+ ntb_epc->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space
+ * @dev: The PCI device.
+ * @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound
+ * address
+ * @bar: BAR of @ntb_epc in for which memory has to be allocated (could be
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3, BAR_MW4)
+ * @peer_ntb_epc: EPC associated with HOST whose outbound address space is
+ * used by @ntb_epc
+ * @size: Size of the address region that has to be allocated in peers OB SPACE
+ *
+ *
+ * +-----------------+ +---->+----------------+-----------+-----------------+
+ * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 |
+ * +-----------------+ | +----------------+ +-----------------+
+ * | BAR1 | | | Doorbell 2 +---------+ | |
+ * +-----------------+----+ +----------------+ | | |
+ * | BAR2 | | Doorbell 3 +-------+ | +-----------------+
+ * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 |
+ * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+
+ * +-----------------+ | |----------------+ | | | |
+ * | BAR4 | | | | | | +-----------------+
+ * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3||
+ * | BAR5 | | | | | | +-----------------+
+ * +-----------------+ +---->-----------------+ | | | |
+ * EP CONTROLLER 1 | | | | +-----------------+
+ * | | | +---->+ MSI|X ADDRESS 4 |
+ * +----------------+ | +-----------------+
+ * (A) EP CONTROLLER 2 | | |
+ * (OB SPACE) | | |
+ * +-------> MW1 |
+ * | |
+ * | |
+ * (B) +-----------------+
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +-----------------+
+ * PCI Address Space
+ * (Managed by HOST2)
+ *
+ * Allocate memory in OB space of EP CONTROLLER 2 in the above diagram. Allocate
+ * for Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3, MW4).
+ */
+static int epf_ntb_alloc_peer_mem(struct device *dev,
+ struct epf_ntb_epc *ntb_epc,
+ enum epf_ntb_bar bar,
+ struct epf_ntb_epc *peer_ntb_epc,
+ size_t size)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *peer_epc;
+ phys_addr_t phys_addr;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t align;
+
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ peer_epc = peer_ntb_epc->epc;
+ mw_addr = pci_epc_mem_alloc_addr(peer_epc, &phys_addr, size);
+ if (!mw_addr) {
+ dev_err(dev, "%s intf: Failed to allocate OB address\n",
+ pci_epc_interface_string(peer_ntb_epc->type));
+ return -ENOMEM;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+ ntb_epc->mw_addr[barno] = mw_addr;
+
+ epf_bar->phys_addr = phys_addr;
+ epf_bar->size = size;
+ epf_bar->barno = barno;
+ epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_mw_bar_init() - Configure Doorbell and Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper for epf_ntb_alloc_peer_mem() and pci_epc_set_bar() that allocates
+ * memory in OB address space of HOST2 and configures BAR of HOST1
+ */
+static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *peer_ntb_epc, *ntb_epc;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_ctrl *ctrl;
+ u32 num_mws, db_count;
+ enum epf_ntb_bar bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct device *dev;
+ size_t align;
+ int ret, i;
+ u64 size;
+
+ ntb_epc = ntb->epc[type];
+ peer_ntb_epc = ntb->epc[!type];
+
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+ align = epc_features->align;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+ epc = ntb_epc->epc;
+ num_mws = ntb->num_mws;
+ db_count = ntb->db_count;
+
+ for (bar = BAR_DB_MW1, i = 0; i < num_mws; bar++, i++) {
+ if (bar == BAR_DB_MW1) {
+ align = align ? align : 4;
+ size = db_count * align;
+ size = ALIGN(size, ntb->mws_size[i]);
+ ctrl = ntb_epc->reg;
+ ctrl->mw1_offset = size;
+ size += ntb->mws_size[i];
+ } else {
+ size = ntb->mws_size[i];
+ }
+
+ ret = epf_ntb_alloc_peer_mem(dev, ntb_epc, bar,
+ peer_ntb_epc, size);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell mem alloc failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+
+ barno = ntb_epc->epf_ntb_bar[bar];
+ epf_bar = &ntb_epc->epf_bar[barno];
+
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "%s intf: DoorBell BAR set failed\n",
+ pci_epc_interface_string(type));
+ goto err_alloc_peer_mem;
+ }
+ }
+
+ return 0;
+
+err_alloc_peer_mem:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Unbind NTB function device from EPC and relinquish reference to pci_epc
+ * for each of the interface.
+ */
+static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ if (type < 0)
+ return;
+
+ epf = ntb->epf;
+ ntb_epc = ntb->epc[type];
+ if (!ntb_epc)
+ return;
+ epc = ntb_epc->epc;
+ pci_epc_remove_epf(epc, epf, type);
+ pci_epc_put(epc);
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_destroy_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @epc: struct pci_epc to which a particular NTB interface should be associated
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Allocate memory for NTB EPC interface and initialize it.
+ */
+static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
+ struct pci_epc *epc,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct pci_epf_bar *epf_bar;
+ struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
+ struct pci_epf *epf;
+ struct device *dev;
+
+ dev = &ntb->epf->dev;
+
+ ntb_epc = devm_kzalloc(dev, sizeof(*ntb_epc), GFP_KERNEL);
+ if (!ntb_epc)
+ return -ENOMEM;
+
+ epf = ntb->epf;
+ vfunc_no = epf->vfunc_no;
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ epf_bar = epf->bar;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ ntb_epc->linkup = false;
+ ntb_epc->epc = epc;
+ ntb_epc->func_no = func_no;
+ ntb_epc->vfunc_no = vfunc_no;
+ ntb_epc->type = type;
+ ntb_epc->epf_bar = epf_bar;
+ ntb_epc->epf_ntb = ntb;
+
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+ if (!epc_features)
+ return -EINVAL;
+ ntb_epc->epc_features = epc_features;
+
+ ntb->epc[type] = ntb_epc;
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_create() - Create and initialize NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Get a reference to EPC device and bind NTB function device to that EPC
+ * for each of the interface. It is also a wrapper to
+ * epf_ntb_epc_create_interface() to allocate memory for NTB EPC interface
+ * and initialize it
+ */
+static int epf_ntb_epc_create(struct epf_ntb *ntb)
+{
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->epc, PRIMARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "PRIMARY intf: Fail to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc,
+ SECONDARY_INTERFACE);
+ if (ret) {
+ dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n");
+ goto err_epc_create;
+ }
+
+ return 0;
+
+err_epc_create:
+ epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_init_epc_bar_interface() - Identify BARs to be used for each of
+ * the NTB constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Identify the free BARs to be used for each of BAR_CONFIG, BAR_PEER_SPAD,
+ * BAR_DB_MW1, BAR_MW2, BAR_MW3 and BAR_MW4.
+ */
+static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ const struct pci_epc_features *epc_features;
+ struct epf_ntb_epc *ntb_epc;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ ntb_epc = ntb->epc[type];
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = ntb_epc->epc_features;
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_DB_MW1; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "%s intf: Fail to get NTB function BAR\n",
+ pci_epc_interface_string(type));
+ return barno;
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW2, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb_epc->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs
+ * to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2,
+ * BAR_MW3 and BAR_MW4 for all the interfaces.
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_init_epc_bar_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "Fail to init EPC bar for %s interface\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init_interface() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from host. This function will write to the
+ * EP controller HW for configuring it.
+ */
+static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ ntb_epc = ntb->epc[type];
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = ntb_epc->epc;
+ func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
+ if (ret) {
+ dev_err(dev, "%s intf: Config/self SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ return ret;
+ }
+
+ ret = epf_ntb_peer_spad_bar_set(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Peer SPAD BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Interrupt configuration failed\n",
+ pci_epc_interface_string(type));
+ goto err_peer_spad_bar_init;
+ }
+
+ ret = epf_ntb_db_mw_bar_init(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: DB/MW BAR init failed\n",
+ pci_epc_interface_string(type));
+ goto err_db_mw_bar_init;
+ }
+
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "%s intf: Configuration header write failed\n",
+ pci_epc_interface_string(type));
+ goto err_write_header;
+ }
+ }
+
+ INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+
+err_db_mw_bar_init:
+ epf_ntb_peer_spad_bar_clear(ntb->epc[type]);
+
+err_peer_spad_bar_init:
+ epf_ntb_config_sspad_bar_clear(ntb->epc[type]);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_epc_cleanup_interface() - Cleanup NTB interface
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ * @type: PRIMARY interface or SECONDARY interface
+ *
+ * Wrapper to cleanup a particular NTB interface.
+ */
+static void epf_ntb_epc_cleanup_interface(struct epf_ntb *ntb,
+ enum pci_epc_interface_type type)
+{
+ struct epf_ntb_epc *ntb_epc;
+
+ if (type < 0)
+ return;
+
+ ntb_epc = ntb->epc[type];
+ cancel_delayed_work(&ntb_epc->cmd_handler);
+ epf_ntb_db_mw_bar_cleanup(ntb, type);
+ epf_ntb_peer_spad_bar_clear(ntb_epc);
+ epf_ntb_config_sspad_bar_clear(ntb_epc);
+}
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++)
+ epf_ntb_epc_cleanup_interface(ntb, type);
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST1 and HOST2
+ *
+ * Wrapper to initialize all NTB interface and start the workqueue
+ * to check for commands from host.
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ enum pci_epc_interface_type type;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
+ ret = epf_ntb_epc_init_interface(ntb, type);
+ if (ret) {
+ dev_err(dev, "%s intf: Failed to initialize\n",
+ pci_epc_interface_string(type));
+ goto err_init_type;
+ }
+ }
+
+ return 0;
+
+err_init_type:
+ epf_ntb_epc_cleanup_interface(ntb, type - 1);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ if (!epf->sec_epc) {
+ dev_dbg(dev, "SECONDARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_epc_create(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ return ret;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc_interface(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sysfs_emit(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ \
+ if (kstrtou32(page, 0, &val) < 0) \
+ return -EINVAL; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ int win_no; \
+ \
+ sscanf(#_name, "mw%d", &win_no); \
+ \
+ return sysfs_emit(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ \
+ if (kstrtou64(page, 0, &val) < 0) \
+ return -EINVAL; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (ntb->num_mws < win_no) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+
+ if (kstrtou32(page, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ * @id: NTB endpoint function device ID
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ */
+static int epf_ntb_probe(struct pci_epf *epf,
+ const struct pci_epf_device_id *id)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ epf_set_drvdata(epf, ntb);
+
+ return 0;
+}
+
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_ntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_ntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
new file mode 100644
index 0000000000..1f0d2b8429
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci_ids.h>
+#include <linux/random.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci_regs.h>
+
+#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_MSI 1
+#define IRQ_TYPE_MSIX 2
+
+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_MSI_IRQ BIT(1)
+#define COMMAND_RAISE_MSIX_IRQ BIT(2)
+#define COMMAND_READ BIT(3)
+#define COMMAND_WRITE BIT(4)
+#define COMMAND_COPY BIT(5)
+
+#define STATUS_READ_SUCCESS BIT(0)
+#define STATUS_READ_FAIL BIT(1)
+#define STATUS_WRITE_SUCCESS BIT(2)
+#define STATUS_WRITE_FAIL BIT(3)
+#define STATUS_COPY_SUCCESS BIT(4)
+#define STATUS_COPY_FAIL BIT(5)
+#define STATUS_IRQ_RAISED BIT(6)
+#define STATUS_SRC_ADDR_INVALID BIT(7)
+#define STATUS_DST_ADDR_INVALID BIT(8)
+
+#define FLAG_USE_DMA BIT(0)
+
+#define TIMER_RESOLUTION 1
+
+static struct workqueue_struct *kpcitest_workqueue;
+
+struct pci_epf_test {
+ void *reg[PCI_STD_NUM_BARS];
+ struct pci_epf *epf;
+ enum pci_barno test_reg_bar;
+ size_t msix_table_offset;
+ struct delayed_work cmd_handler;
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
+ struct dma_chan *transfer_chan;
+ dma_cookie_t transfer_cookie;
+ enum dma_status transfer_status;
+ struct completion transfer_complete;
+ bool dma_supported;
+ bool dma_private;
+ const struct pci_epc_features *epc_features;
+};
+
+struct pci_epf_test_reg {
+ u32 magic;
+ u32 command;
+ u32 status;
+ u64 src_addr;
+ u64 dst_addr;
+ u32 size;
+ u32 checksum;
+ u32 irq_type;
+ u32 irq_number;
+ u32 flags;
+} __packed;
+
+static struct pci_epf_header test_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+
+static void pci_epf_test_dma_callback(void *param)
+{
+ struct pci_epf_test *epf_test = param;
+ struct dma_tx_state state;
+
+ epf_test->transfer_status =
+ dmaengine_tx_status(epf_test->transfer_chan,
+ epf_test->transfer_cookie, &state);
+ if (epf_test->transfer_status == DMA_COMPLETE ||
+ epf_test->transfer_status == DMA_ERROR)
+ complete(&epf_test->transfer_complete);
+}
+
+/**
+ * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
+ * data between PCIe EP and remote PCIe RC
+ * @epf_test: the EPF test device that performs the data transfer operation
+ * @dma_dst: The destination address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @dma_src: The source address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @len: The size of the data transfer
+ * @dma_remote: remote RC physical address
+ * @dir: DMA transfer direction
+ *
+ * Function that uses dmaengine API to transfer data between PCIe EP and remote
+ * PCIe RC. The source and destination address can be a physical address given
+ * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
+ *
+ * The function returns '0' on success and negative value on failure.
+ */
+static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len, dma_addr_t dma_remote,
+ enum dma_transfer_direction dir)
+{
+ struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
+ epf_test->dma_chan_tx : epf_test->dma_chan_rx;
+ dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct pci_epf *epf = epf_test->epf;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_slave_config sconf = {};
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (IS_ERR_OR_NULL(chan)) {
+ dev_err(dev, "Invalid DMA memcpy channel\n");
+ return -EINVAL;
+ }
+
+ if (epf_test->dma_private) {
+ sconf.direction = dir;
+ if (dir == DMA_MEM_TO_DEV)
+ sconf.dst_addr = dma_remote;
+ else
+ sconf.src_addr = dma_remote;
+
+ if (dmaengine_slave_config(chan, &sconf)) {
+ dev_err(dev, "DMA slave config fail\n");
+ return -EIO;
+ }
+ tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
+ flags);
+ } else {
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
+ flags);
+ }
+
+ if (!tx) {
+ dev_err(dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ reinit_completion(&epf_test->transfer_complete);
+ epf_test->transfer_chan = chan;
+ tx->callback = pci_epf_test_dma_callback;
+ tx->callback_param = epf_test;
+ epf_test->transfer_cookie = dmaengine_submit(tx);
+
+ ret = dma_submit_error(epf_test->transfer_cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
+ goto terminate;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
+ if (ret < 0) {
+ dev_err(dev, "DMA wait_for_completion interrupted\n");
+ goto terminate;
+ }
+
+ if (epf_test->transfer_status == DMA_ERROR) {
+ dev_err(dev, "DMA transfer failed\n");
+ ret = -EIO;
+ }
+
+terminate:
+ dmaengine_terminate_sync(chan);
+
+ return ret;
+}
+
+struct epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+ struct epf_dma_filter *filter = node;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev
+ && (filter->dma_mask & caps.directions);
+}
+
+/**
+ * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
+ * @epf_test: the EPF test device that performs data transfer operation
+ *
+ * Function to initialize EPF test DMA channel.
+ */
+static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
+{
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct epf_dma_filter filter;
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+ int ret;
+
+ filter.dev = epf->epc->dev.parent;
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
+ goto fail_back_tx;
+ }
+
+ epf_test->dma_chan_rx = dma_chan;
+
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
+
+ if (!dma_chan) {
+ dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
+ goto fail_back_rx;
+ }
+
+ epf_test->dma_chan_tx = dma_chan;
+ epf_test->dma_private = true;
+
+ init_completion(&epf_test->transfer_complete);
+
+ return 0;
+
+fail_back_rx:
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_tx = NULL;
+
+fail_back_tx:
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dma_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get DMA channel\n");
+ return ret;
+ }
+ init_completion(&epf_test->transfer_complete);
+
+ epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
+
+ return 0;
+}
+
+/**
+ * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
+ * @epf_test: the EPF test device that performs data transfer operation
+ *
+ * Helper to cleanup EPF test DMA channel.
+ */
+static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
+{
+ if (!epf_test->dma_supported)
+ return;
+
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
+
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+
+ return;
+}
+
+static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
+ const char *op, u64 size,
+ struct timespec64 *start,
+ struct timespec64 *end, bool dma)
+{
+ struct timespec64 ts = timespec64_sub(*end, *start);
+ u64 rate = 0, ns;
+
+ /* calculate the rate */
+ ns = timespec64_to_ns(&ts);
+ if (ns)
+ rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
+
+ dev_info(&epf_test->epf->dev,
+ "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
+ op, size, dma ? "YES" : "NO",
+ (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
+}
+
+static void pci_epf_test_copy(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ int ret;
+ void __iomem *src_addr;
+ void __iomem *dst_addr;
+ phys_addr_t src_phys_addr;
+ phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
+ if (!src_addr) {
+ dev_err(dev, "Failed to allocate source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
+ reg->src_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "Failed to map source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_src_addr;
+ }
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
+ if (!dst_addr) {
+ dev_err(dev, "Failed to allocate destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err_src_map_addr;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
+ reg->dst_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "Failed to map destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_dst_addr;
+ }
+
+ ktime_get_ts64(&start);
+ if (reg->flags & FLAG_USE_DMA) {
+ if (epf_test->dma_private) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ src_phys_addr, reg->size, 0,
+ DMA_MEM_TO_MEM);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ } else {
+ void *buf;
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ memcpy_fromio(buf, src_addr, reg->size);
+ memcpy_toio(dst_addr, buf, reg->size);
+ kfree(buf);
+ }
+ ktime_get_ts64(&end);
+ pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
+ reg->flags & FLAG_USE_DMA);
+
+err_map_addr:
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
+
+err_dst_addr:
+ pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+
+err_src_map_addr:
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
+
+err_src_addr:
+ pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+
+err:
+ if (!ret)
+ reg->status |= STATUS_COPY_SUCCESS;
+ else
+ reg->status |= STATUS_COPY_FAIL;
+}
+
+static void pci_epf_test_read(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ int ret;
+ void __iomem *src_addr;
+ void *buf;
+ u32 crc32;
+ phys_addr_t phys_addr;
+ phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!src_addr) {
+ dev_err(dev, "Failed to allocate address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->src_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_addr;
+ }
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ if (reg->flags & FLAG_USE_DMA) {
+ dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dst_phys_addr)) {
+ dev_err(dev, "Failed to map destination buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ phys_addr, reg->size,
+ reg->src_addr, DMA_DEV_TO_MEM);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
+ DMA_FROM_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_fromio(buf, src_addr, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
+ reg->flags & FLAG_USE_DMA);
+
+ crc32 = crc32_le(~0, buf, reg->size);
+ if (crc32 != reg->checksum)
+ ret = -EIO;
+
+err_dma_map:
+ kfree(buf);
+
+err_map_addr:
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+
+err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
+
+err:
+ if (!ret)
+ reg->status |= STATUS_READ_SUCCESS;
+ else
+ reg->status |= STATUS_READ_FAIL;
+}
+
+static void pci_epf_test_write(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ int ret;
+ void __iomem *dst_addr;
+ void *buf;
+ phys_addr_t phys_addr;
+ phys_addr_t src_phys_addr;
+ struct timespec64 start, end;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!dst_addr) {
+ dev_err(dev, "Failed to allocate address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->dst_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_addr;
+ }
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ get_random_bytes(buf, reg->size);
+ reg->checksum = crc32_le(~0, buf, reg->size);
+
+ if (reg->flags & FLAG_USE_DMA) {
+ src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, src_phys_addr)) {
+ dev_err(dev, "Failed to map source buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+
+ ret = pci_epf_test_data_transfer(epf_test, phys_addr,
+ src_phys_addr, reg->size,
+ reg->dst_addr,
+ DMA_MEM_TO_DEV);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, src_phys_addr, reg->size,
+ DMA_TO_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_toio(dst_addr, buf, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
+ reg->flags & FLAG_USE_DMA);
+
+ /*
+ * wait 1ms inorder for the write to complete. Without this delay L3
+ * error in observed in the host system.
+ */
+ usleep_range(1000, 2000);
+
+err_dma_map:
+ kfree(buf);
+
+err_map_addr:
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+
+err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
+
+err:
+ if (!ret)
+ reg->status |= STATUS_WRITE_SUCCESS;
+ else
+ reg->status |= STATUS_WRITE_FAIL;
+}
+
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ u32 status = reg->status | STATUS_IRQ_RAISED;
+ int count;
+
+ /*
+ * Set the status before raising the IRQ to ensure that the host sees
+ * the updated value when it gets the IRQ.
+ */
+ WRITE_ONCE(reg->status, status);
+
+ switch (reg->irq_type) {
+ case IRQ_TYPE_LEGACY:
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
+ break;
+ case IRQ_TYPE_MSI:
+ count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
+ if (reg->irq_number > count || count <= 0) {
+ dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
+ reg->irq_number, count);
+ return;
+ }
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, reg->irq_number);
+ break;
+ case IRQ_TYPE_MSIX:
+ count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
+ if (reg->irq_number > count || count <= 0) {
+ dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
+ reg->irq_number, count);
+ return;
+ }
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, reg->irq_number);
+ break;
+ default:
+ dev_err(dev, "Failed to raise IRQ, unknown type\n");
+ break;
+ }
+}
+
+static void pci_epf_test_cmd_handler(struct work_struct *work)
+{
+ u32 command;
+ struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
+ cmd_handler.work);
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+
+ command = READ_ONCE(reg->command);
+ if (!command)
+ goto reset_handler;
+
+ WRITE_ONCE(reg->command, 0);
+ WRITE_ONCE(reg->status, 0);
+
+ if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
+ !epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ goto reset_handler;
+ }
+
+ if (reg->irq_type > IRQ_TYPE_MSIX) {
+ dev_err(dev, "Failed to detect IRQ type\n");
+ goto reset_handler;
+ }
+
+ switch (command) {
+ case COMMAND_RAISE_LEGACY_IRQ:
+ case COMMAND_RAISE_MSI_IRQ:
+ case COMMAND_RAISE_MSIX_IRQ:
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
+ case COMMAND_WRITE:
+ pci_epf_test_write(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
+ case COMMAND_READ:
+ pci_epf_test_read(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
+ case COMMAND_COPY:
+ pci_epf_test_copy(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
+ default:
+ dev_err(dev, "Invalid command 0x%x\n", command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+}
+
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_bar *epf_bar;
+ int bar;
+
+ cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ epf_bar = &epf->bar[bar];
+
+ if (epf_test->reg[bar]) {
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
+ }
+ }
+}
+
+static int pci_epf_test_set_bar(struct pci_epf *epf)
+{
+ int bar, add;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ const struct pci_epc_features *epc_features;
+
+ epc_features = epf_test->epc_features;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
+ epf_bar = &epf->bar[bar];
+ /*
+ * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
+ * if the specific implementation required a 64-bit BAR,
+ * even if we only requested a 32-bit BAR.
+ */
+ add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+
+ if (!!(epc_features->reserved_bar & (1 << bar)))
+ continue;
+
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
+ if (ret) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
+ dev_err(dev, "Failed to set BAR%d\n", bar);
+ if (bar == test_reg_bar)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_core_init(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epf_header *header = epf->header;
+ const struct pci_epc_features *epc_features;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ bool msix_capable = false;
+ bool msi_capable = true;
+ int ret;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
+ if (epc_features) {
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+ }
+
+ if (epf->vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
+ }
+
+ ret = pci_epf_test_set_bar(epf);
+ if (ret)
+ return ret;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts,
+ epf_test->test_reg_bar,
+ epf_test->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI-X configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_link_up(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+
+ return 0;
+}
+
+static const struct pci_epc_event_ops pci_epf_test_event_ops = {
+ .core_init = pci_epf_test_core_init,
+ .link_up = pci_epf_test_link_up,
+};
+
+static int pci_epf_test_alloc_space(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ struct pci_epf_bar *epf_bar;
+ size_t msix_table_size = 0;
+ size_t test_reg_bar_size;
+ size_t pba_size = 0;
+ bool msix_capable;
+ void *base;
+ int bar, add;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ const struct pci_epc_features *epc_features;
+ size_t test_reg_size;
+
+ epc_features = epf_test->epc_features;
+
+ test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
+ epf_test->msix_table_offset = test_reg_bar_size;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
+ }
+ test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
+
+ if (epc_features->bar_fixed_size[test_reg_bar]) {
+ if (test_reg_size > bar_size[test_reg_bar])
+ return -ENOMEM;
+ test_reg_size = bar_size[test_reg_bar];
+ }
+
+ base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
+ epc_features->align, PRIMARY_INTERFACE);
+ if (!base) {
+ dev_err(dev, "Failed to allocated register space\n");
+ return -ENOMEM;
+ }
+ epf_test->reg[test_reg_bar] = base;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
+ epf_bar = &epf->bar[bar];
+ add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+
+ if (bar == test_reg_bar)
+ continue;
+
+ if (!!(epc_features->reserved_bar & (1 << bar)))
+ continue;
+
+ base = pci_epf_alloc_space(epf, bar_size[bar], bar,
+ epc_features->align,
+ PRIMARY_INTERFACE);
+ if (!base)
+ dev_err(dev, "Failed to allocate space for BAR%d\n",
+ bar);
+ epf_test->reg[bar] = base;
+ }
+
+ return 0;
+}
+
+static void pci_epf_configure_bar(struct pci_epf *epf,
+ const struct pci_epc_features *epc_features)
+{
+ struct pci_epf_bar *epf_bar;
+ bool bar_fixed_64bit;
+ int i;
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ epf_bar = &epf->bar[i];
+ bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
+ if (bar_fixed_64bit)
+ epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (epc_features->bar_fixed_size[i])
+ bar_size[i] = epc_features->bar_fixed_size[i];
+ }
+}
+
+static int pci_epf_test_bind(struct pci_epf *epf)
+{
+ int ret;
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ const struct pci_epc_features *epc_features;
+ enum pci_barno test_reg_bar = BAR_0;
+ struct pci_epc *epc = epf->epc;
+ bool linkup_notifier = false;
+ bool core_init_notifier = false;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
+ if (!epc_features) {
+ dev_err(&epf->dev, "epc_features not implemented\n");
+ return -EOPNOTSUPP;
+ }
+
+ linkup_notifier = epc_features->linkup_notifier;
+ core_init_notifier = epc_features->core_init_notifier;
+ test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+ if (test_reg_bar < 0)
+ return -EINVAL;
+ pci_epf_configure_bar(epf, epc_features);
+
+ epf_test->test_reg_bar = test_reg_bar;
+ epf_test->epc_features = epc_features;
+
+ ret = pci_epf_test_alloc_space(epf);
+ if (ret)
+ return ret;
+
+ if (!core_init_notifier) {
+ ret = pci_epf_test_core_init(epf);
+ if (ret)
+ return ret;
+ }
+
+ epf_test->dma_supported = true;
+
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
+
+ if (!linkup_notifier && !core_init_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+
+ return 0;
+}
+
+static const struct pci_epf_device_id pci_epf_test_ids[] = {
+ {
+ .name = "pci_epf_test",
+ },
+ {},
+};
+
+static int pci_epf_test_probe(struct pci_epf *epf,
+ const struct pci_epf_device_id *id)
+{
+ struct pci_epf_test *epf_test;
+ struct device *dev = &epf->dev;
+
+ epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
+ if (!epf_test)
+ return -ENOMEM;
+
+ epf->header = &test_header;
+ epf_test->epf = epf;
+
+ INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
+
+ epf->event_ops = &pci_epf_test_event_ops;
+
+ epf_set_drvdata(epf, epf_test);
+ return 0;
+}
+
+static struct pci_epf_ops ops = {
+ .unbind = pci_epf_test_unbind,
+ .bind = pci_epf_test_bind,
+};
+
+static struct pci_epf_driver test_driver = {
+ .driver.name = "pci_epf_test",
+ .probe = pci_epf_test_probe,
+ .id_table = pci_epf_test_ids,
+ .ops = &ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_test_init(void)
+{
+ int ret;
+
+ kpcitest_workqueue = alloc_workqueue("kpcitest",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!kpcitest_workqueue) {
+ pr_err("Failed to allocate the kpcitest work queue\n");
+ return -ENOMEM;
+ }
+
+ ret = pci_epf_register_driver(&test_driver);
+ if (ret) {
+ destroy_workqueue(kpcitest_workqueue);
+ pr_err("Failed to register pci epf test driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_test_init);
+
+static void __exit pci_epf_test_exit(void)
+{
+ if (kpcitest_workqueue)
+ destroy_workqueue(kpcitest_workqueue);
+ pci_epf_unregister_driver(&test_driver);
+}
+module_exit(pci_epf_test_exit);
+
+MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
new file mode 100644
index 0000000000..3f60128560
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -0,0 +1,1468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Endpoint Function Driver to implement Non-Transparent Bridge functionality
+ * Between PCI RC and EP
+ *
+ * Copyright (C) 2020 Texas Instruments
+ * Copyright (C) 2022 NXP
+ *
+ * Based on pci-epf-ntb.c
+ * Author: Frank Li <Frank.Li@nxp.com>
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+/*
+ * +------------+ +---------------------------------------+
+ * | | | |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | NetDev | | | NetDev |
+ * +------------+ | +--------------+
+ * | NTB | | | NTB |
+ * | Transfer | | | Transfer |
+ * +------------+ | +--------------+
+ * | | | | |
+ * | PCI NTB | | | |
+ * | EPF | | | |
+ * | Driver | | | PCI Virtual |
+ * | | +---------------+ | NTB Driver |
+ * | | | PCI EP NTB |<------>| |
+ * | | | FN Driver | | |
+ * +------------+ +---------------+ +--------------+
+ * | | | | | |
+ * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI |
+ * | | PCI | | | Bus |
+ * +------------+ +---------------+--------+--------------+
+ * PCIe Root Port PCI EP
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/ntb.h>
+
+static struct workqueue_struct *kpcintb_workqueue;
+
+#define COMMAND_CONFIGURE_DOORBELL 1
+#define COMMAND_TEARDOWN_DOORBELL 2
+#define COMMAND_CONFIGURE_MW 3
+#define COMMAND_TEARDOWN_MW 4
+#define COMMAND_LINK_UP 5
+#define COMMAND_LINK_DOWN 6
+
+#define COMMAND_STATUS_OK 1
+#define COMMAND_STATUS_ERROR 2
+
+#define LINK_STATUS_UP BIT(0)
+
+#define SPAD_COUNT 64
+#define DB_COUNT 4
+#define NTB_MW_OFFSET 2
+#define DB_COUNT_MASK GENMASK(15, 0)
+#define MSIX_ENABLE BIT(16)
+#define MAX_DB_COUNT 32
+#define MAX_MW 4
+
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_DB,
+ BAR_MW0,
+ BAR_MW1,
+ BAR_MW2,
+};
+
+/*
+ * +--------------------------------------------------+ Base
+ * | |
+ * | |
+ * | |
+ * | Common Control Register |
+ * | |
+ * | |
+ * | |
+ * +-----------------------+--------------------------+ Base+spad_offset
+ * | | |
+ * | Peer Spad Space | Spad Space |
+ * | | |
+ * | | |
+ * +-----------------------+--------------------------+ Base+spad_offset
+ * | | | +spad_count * 4
+ * | | |
+ * | Spad Space | Peer Spad Space |
+ * | | |
+ * +-----------------------+--------------------------+
+ * Virtual PCI PCIe Endpoint
+ * NTB Driver NTB Driver
+ */
+struct epf_ntb_ctrl {
+ u32 command;
+ u32 argument;
+ u16 command_status;
+ u16 link_status;
+ u32 topology;
+ u64 addr;
+ u64 size;
+ u32 num_mws;
+ u32 reserved;
+ u32 spad_offset;
+ u32 spad_count;
+ u32 db_entry_size;
+ u32 db_data[MAX_DB_COUNT];
+ u32 db_offset[MAX_DB_COUNT];
+} __packed;
+
+struct epf_ntb {
+ struct ntb_dev ntb;
+ struct pci_epf *epf;
+ struct config_group group;
+
+ u32 num_mws;
+ u32 db_count;
+ u32 spad_count;
+ u64 mws_size[MAX_MW];
+ u64 db;
+ u32 vbus_number;
+ u16 vntb_pid;
+ u16 vntb_vid;
+
+ bool linkup;
+ u32 spad_size;
+
+ enum pci_barno epf_ntb_bar[6];
+
+ struct epf_ntb_ctrl *reg;
+
+ u32 *epf_db;
+
+ phys_addr_t vpci_mw_phy[MAX_MW];
+ void __iomem *vpci_mw_addr[MAX_MW];
+
+ struct delayed_work cmd_handler;
+};
+
+#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group)
+#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb)
+
+static struct pci_epf_header epf_ntb_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_BASE_CLASS_MEMORY,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+/**
+ * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST)
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @link_up: true or false indicating Link is UP or Down
+ *
+ * Once NTB function in HOST invoke ntb_link_enable(),
+ * this NTB function driver will trigger a link event to VHOST.
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
+{
+ if (link_up)
+ ntb->reg->link_status |= LINK_STATUS_UP;
+ else
+ ntb->reg->link_status &= ~LINK_STATUS_UP;
+
+ ntb_link_event(&ntb->ntb);
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST
+ * to access the memory window of HOST
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * EP Outbound Window
+ * +--------+ +-----------+
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | | +-----------+
+ * | Virtual| | Memory Win|
+ * | NTB | -----------> | |
+ * | Driver | | |
+ * | | +-----------+
+ * | | | |
+ * | | | |
+ * +--------+ +-----------+
+ * VHOST PCI EP
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw)
+{
+ phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
+ u64 addr, size;
+ int ret = 0;
+
+ phys_addr = ntb->vpci_mw_phy[mw];
+ addr = ntb->reg->addr;
+ size = ntb->reg->size;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size);
+ if (ret)
+ dev_err(&ntb->epf->epc->dev,
+ "Failed to map memory window %d address\n", mw);
+ return ret;
+}
+
+/**
+ * epf_ntb_teardown_mw() - Teardown the configured OB ATU
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @mw: Index of the memory window (either 0, 1, 2 or 3)
+ *
+ * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using
+ * pci_epc_unmap_addr()
+ */
+static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw)
+{
+ pci_epc_unmap_addr(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ ntb->vpci_mw_phy[mw]);
+}
+
+/**
+ * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST
+ * @work: work_struct for the epf_ntb_epc
+ *
+ * Workqueue function that gets invoked for the two epf_ntb_epc
+ * periodically (once every 5ms) to see if it has received any commands
+ * from NTB HOST. The HOST can send commands to configure doorbell or
+ * configure memory window or to update link status.
+ */
+static void epf_ntb_cmd_handler(struct work_struct *work)
+{
+ struct epf_ntb_ctrl *ctrl;
+ u32 command, argument;
+ struct epf_ntb *ntb;
+ struct device *dev;
+ int ret;
+ int i;
+
+ ntb = container_of(work, struct epf_ntb, cmd_handler.work);
+
+ for (i = 1; i < ntb->db_count; i++) {
+ if (ntb->epf_db[i]) {
+ ntb->db |= 1 << (i - 1);
+ ntb_db_event(&ntb->ntb, i);
+ ntb->epf_db[i] = 0;
+ }
+ }
+
+ ctrl = ntb->reg;
+ command = ctrl->command;
+ if (!command)
+ goto reset_handler;
+ argument = ctrl->argument;
+
+ ctrl->command = 0;
+ ctrl->argument = 0;
+
+ ctrl = ntb->reg;
+ dev = &ntb->epf->dev;
+
+ switch (command) {
+ case COMMAND_CONFIGURE_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_DOORBELL:
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_CONFIGURE_MW:
+ ret = epf_ntb_configure_mw(ntb, argument);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_TEARDOWN_MW:
+ epf_ntb_teardown_mw(ntb, argument);
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ case COMMAND_LINK_UP:
+ ntb->linkup = true;
+ ret = epf_ntb_link_up(ntb, true);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ goto reset_handler;
+ case COMMAND_LINK_DOWN:
+ ntb->linkup = false;
+ ret = epf_ntb_link_up(ntb, false);
+ if (ret < 0)
+ ctrl->command_status = COMMAND_STATUS_ERROR;
+ else
+ ctrl->command_status = COMMAND_STATUS_OK;
+ break;
+ default:
+ dev_err(dev, "UNKNOWN command: %d\n", command);
+ break;
+ }
+
+reset_handler:
+ queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
+ msecs_to_jiffies(5));
+}
+
+/**
+ * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR
+ * @ntb: EPC associated with one of the HOST which holds peer's outbound
+ * address.
+ *
+ * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and
+ * self scratchpad region (removes inbound ATU configuration). While BAR0 is
+ * the default self scratchpad BAR, an NTB could have other BARs for self
+ * scratchpad (because of reserved BARs). This function can get the exact BAR
+ * used for self scratchpad from epf_ntb_bar[BAR_CONFIG].
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR. Also note VHOST's peer
+ * scratchpad is HOST's self scratchpad.
+ *
+ * Returns: void
+ */
+static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+}
+
+/**
+ * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Map BAR0 of EP CONTROLLER which contains the VHOST's config and
+ * self scratchpad region.
+ *
+ * Please note the self scratchpad region and config region is combined to
+ * a single region and mapped using the same BAR.
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb)
+{
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ u8 func_no, vfunc_no;
+ struct device *dev;
+ int ret;
+
+ dev = &ntb->epf->dev;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * epf_ntb_config_spad_bar_free() - Free the physical memory associated with
+ * config + scratchpad region
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ */
+static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ pci_epf_free_space(ntb->epf, ntb->reg, barno, 0);
+}
+
+/**
+ * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad
+ * region
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Allocate the Local Memory mentioned in the above diagram. The size of
+ * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION
+ * is obtained from "spad-count" configfs entry.
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
+{
+ size_t align;
+ enum pci_barno barno;
+ struct epf_ntb_ctrl *ctrl;
+ u32 spad_size, ctrl_size;
+ u64 size;
+ struct pci_epf *epf = ntb->epf;
+ struct device *dev = &epf->dev;
+ u32 spad_count;
+ void *base;
+ int i;
+ const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc,
+ epf->func_no,
+ epf->vfunc_no);
+ barno = ntb->epf_ntb_bar[BAR_CONFIG];
+ size = epc_features->bar_fixed_size[barno];
+ align = epc_features->align;
+
+ if ((!IS_ALIGNED(size, align)))
+ return -EINVAL;
+
+ spad_count = ntb->spad_count;
+
+ ctrl_size = sizeof(struct epf_ntb_ctrl);
+ spad_size = 2 * spad_count * sizeof(u32);
+
+ if (!align) {
+ ctrl_size = roundup_pow_of_two(ctrl_size);
+ spad_size = roundup_pow_of_two(spad_size);
+ } else {
+ ctrl_size = ALIGN(ctrl_size, align);
+ spad_size = ALIGN(spad_size, align);
+ }
+
+ if (!size)
+ size = ctrl_size + spad_size;
+ else if (size < ctrl_size + spad_size)
+ return -EINVAL;
+
+ base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ if (!base) {
+ dev_err(dev, "Config/Status/SPAD alloc region fail\n");
+ return -ENOMEM;
+ }
+
+ ntb->reg = base;
+
+ ctrl = ntb->reg;
+ ctrl->spad_offset = ctrl_size;
+
+ ctrl->spad_count = spad_count;
+ ctrl->num_mws = ntb->num_mws;
+ ntb->spad_size = spad_size;
+
+ ctrl->db_entry_size = sizeof(u32);
+
+ for (i = 0; i < ntb->db_count; i++) {
+ ntb->reg->db_data[i] = 1 + i;
+ ntb->reg->db_offset[i] = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Configure MSI/MSI-X capability for each interface with number of
+ * interrupts equal to "db_count" configfs entry.
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ struct device *dev;
+ u32 db_count;
+ int ret;
+
+ dev = &ntb->epf->dev;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ if (!(epc_features->msix_capable || epc_features->msi_capable)) {
+ dev_err(dev, "MSI or MSI-X is required for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_count = ntb->db_count;
+ if (db_count > MAX_DB_COUNT) {
+ dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT);
+ return -EINVAL;
+ }
+
+ ntb->db_count = db_count;
+
+ if (epc_features->msi_capable) {
+ ret = pci_epc_set_msi(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ 16);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_db_bar_init() - Configure Doorbell window BARs
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ u32 align;
+ struct device *dev = &ntb->epf->dev;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ void __iomem *mw_addr;
+ enum pci_barno barno;
+ size_t size = sizeof(u32) * ntb->db_count;
+
+ epc_features = pci_epc_get_features(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no);
+ align = epc_features->align;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
+ if (!mw_addr) {
+ dev_err(dev, "Failed to allocate OB address\n");
+ return -ENOMEM;
+ }
+
+ ntb->epf_db = mw_addr;
+
+ epf_bar = &ntb->epf->bar[barno];
+
+ ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Doorbell BAR set failed\n");
+ goto err_alloc_peer_mem;
+ }
+ return ret;
+
+err_alloc_peer_mem:
+ pci_epf_free_space(ntb->epf, mw_addr, barno, 0);
+ return -1;
+}
+
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws);
+
+/**
+ * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory
+ * allocated in peer's outbound address space
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ */
+static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
+{
+ enum pci_barno barno;
+
+ barno = ntb->epf_ntb_bar[BAR_DB];
+ pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+}
+
+/**
+ * epf_ntb_mw_bar_init() - Configure Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
+{
+ int ret = 0;
+ int i;
+ u64 size;
+ enum pci_barno barno;
+ struct device *dev = &ntb->epf->dev;
+
+ for (i = 0; i < ntb->num_mws; i++) {
+ size = ntb->mws_size[i];
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+
+ ntb->epf->bar[barno].barno = barno;
+ ntb->epf->bar[barno].size = size;
+ ntb->epf->bar[barno].addr = NULL;
+ ntb->epf->bar[barno].phys_addr = 0;
+ ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ ret = pci_epc_set_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+ if (ret) {
+ dev_err(dev, "MW set failed\n");
+ goto err_alloc_mem;
+ }
+
+ /* Allocate EPC outbound memory windows to vpci vntb device */
+ ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc,
+ &ntb->vpci_mw_phy[i],
+ size);
+ if (!ntb->vpci_mw_addr[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to allocate source address\n");
+ goto err_set_bar;
+ }
+ }
+
+ return ret;
+
+err_set_bar:
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+err_alloc_mem:
+ epf_ntb_mw_bar_clear(ntb, i);
+ return ret;
+}
+
+/**
+ * epf_ntb_mw_bar_clear() - Clear Memory window BARs
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @num_mws: the number of Memory window BARs that to be cleared
+ */
+static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+{
+ enum pci_barno barno;
+ int i;
+
+ for (i = 0; i < num_mws; i++) {
+ barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ pci_epc_clear_bar(ntb->epf->epc,
+ ntb->epf->func_no,
+ ntb->epf->vfunc_no,
+ &ntb->epf->bar[barno]);
+
+ pci_epc_mem_free_addr(ntb->epf->epc,
+ ntb->vpci_mw_phy[i],
+ ntb->vpci_mw_addr[i],
+ ntb->mws_size[i]);
+ }
+}
+
+/**
+ * epf_ntb_epc_destroy() - Cleanup NTB EPC interface
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces
+ */
+static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
+{
+ pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0);
+ pci_epc_put(ntb->epf->epc);
+}
+
+/**
+ * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
+ * constructs (scratchpad region, doorbell, memorywindow)
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
+{
+ const struct pci_epc_features *epc_features;
+ enum pci_barno barno;
+ enum epf_ntb_bar bar;
+ struct device *dev;
+ u32 num_mws;
+ int i;
+
+ barno = BAR_0;
+ num_mws = ntb->num_mws;
+ dev = &ntb->epf->dev;
+ epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
+
+ /* These are required BARs which are mandatory for NTB functionality */
+ for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ dev_err(dev, "Fail to get NTB function BAR\n");
+ return barno;
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ /* These are optional BARs which don't impact NTB functionality */
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0) {
+ ntb->num_mws = i;
+ dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
+ }
+ ntb->epf_ntb_bar[bar] = barno;
+ }
+
+ return 0;
+}
+
+/**
+ * epf_ntb_epc_init() - Initialize NTB interface
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Wrapper to initialize a particular EPC interface and start the workqueue
+ * to check for commands from HOST. This function will write to the
+ * EP controller HW for configuring it.
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_epc_init(struct epf_ntb *ntb)
+{
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct device *dev;
+ int ret;
+
+ epf = ntb->epf;
+ dev = &epf->dev;
+ epc = epf->epc;
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = epf_ntb_config_sspad_bar_set(ntb);
+ if (ret) {
+ dev_err(dev, "Config/self SPAD BAR init failed");
+ return ret;
+ }
+
+ ret = epf_ntb_configure_interrupt(ntb);
+ if (ret) {
+ dev_err(dev, "Interrupt configuration failed\n");
+ goto err_config_interrupt;
+ }
+
+ ret = epf_ntb_db_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "DB BAR init failed\n");
+ goto err_db_bar_init;
+ }
+
+ ret = epf_ntb_mw_bar_init(ntb);
+ if (ret) {
+ dev_err(dev, "MW BAR init failed\n");
+ goto err_mw_bar_init;
+ }
+
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ goto err_write_header;
+ }
+ }
+
+ INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler);
+ queue_work(kpcintb_workqueue, &ntb->cmd_handler.work);
+
+ return 0;
+
+err_write_header:
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+err_mw_bar_init:
+ epf_ntb_db_bar_clear(ntb);
+err_db_bar_init:
+err_config_interrupt:
+ epf_ntb_config_sspad_bar_clear(ntb);
+
+ return ret;
+}
+
+
+/**
+ * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ *
+ * Wrapper to cleanup all NTB interfaces.
+ */
+static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+{
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+}
+
+#define EPF_NTB_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->_name); \
+}
+
+#define EPF_NTB_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ ntb->_name = val; \
+ \
+ return len; \
+}
+
+#define EPF_NTB_MW_R(_name) \
+static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+}
+
+#define EPF_NTB_MW_W(_name) \
+static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ struct device *dev = &ntb->epf->dev; \
+ int win_no; \
+ u64 val; \
+ int ret; \
+ \
+ ret = kstrtou64(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (sscanf(#_name, "mw%d", &win_no) != 1) \
+ return -EINVAL; \
+ \
+ if (win_no <= 0 || win_no > ntb->num_mws) { \
+ dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \
+ return -EINVAL; \
+ } \
+ \
+ ntb->mws_size[win_no - 1] = val; \
+ \
+ return len; \
+}
+
+static ssize_t epf_ntb_num_mws_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct epf_ntb *ntb = to_epf_ntb(group);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > MAX_MW)
+ return -EINVAL;
+
+ ntb->num_mws = val;
+
+ return len;
+}
+
+EPF_NTB_R(spad_count)
+EPF_NTB_W(spad_count)
+EPF_NTB_R(db_count)
+EPF_NTB_W(db_count)
+EPF_NTB_R(num_mws)
+EPF_NTB_R(vbus_number)
+EPF_NTB_W(vbus_number)
+EPF_NTB_R(vntb_pid)
+EPF_NTB_W(vntb_pid)
+EPF_NTB_R(vntb_vid)
+EPF_NTB_W(vntb_vid)
+EPF_NTB_MW_R(mw1)
+EPF_NTB_MW_W(mw1)
+EPF_NTB_MW_R(mw2)
+EPF_NTB_MW_W(mw2)
+EPF_NTB_MW_R(mw3)
+EPF_NTB_MW_W(mw3)
+EPF_NTB_MW_R(mw4)
+EPF_NTB_MW_W(mw4)
+
+CONFIGFS_ATTR(epf_ntb_, spad_count);
+CONFIGFS_ATTR(epf_ntb_, db_count);
+CONFIGFS_ATTR(epf_ntb_, num_mws);
+CONFIGFS_ATTR(epf_ntb_, mw1);
+CONFIGFS_ATTR(epf_ntb_, mw2);
+CONFIGFS_ATTR(epf_ntb_, mw3);
+CONFIGFS_ATTR(epf_ntb_, mw4);
+CONFIGFS_ATTR(epf_ntb_, vbus_number);
+CONFIGFS_ATTR(epf_ntb_, vntb_pid);
+CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+
+static struct configfs_attribute *epf_ntb_attrs[] = {
+ &epf_ntb_attr_spad_count,
+ &epf_ntb_attr_db_count,
+ &epf_ntb_attr_num_mws,
+ &epf_ntb_attr_mw1,
+ &epf_ntb_attr_mw2,
+ &epf_ntb_attr_mw3,
+ &epf_ntb_attr_mw4,
+ &epf_ntb_attr_vbus_number,
+ &epf_ntb_attr_vntb_pid,
+ &epf_ntb_attr_vntb_vid,
+ NULL,
+};
+
+static const struct config_item_type ntb_group_type = {
+ .ct_attrs = epf_ntb_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * epf_ntb_add_cfs() - Add configfs directory specific to NTB
+ * @epf: NTB endpoint function device
+ * @group: A pointer to the config_group structure referencing a group of
+ * config_items of a specific type that belong to a specific sub-system.
+ *
+ * Add configfs directory specific to NTB. This directory will hold
+ * NTB specific properties like db_count, spad_count, num_mws etc.,
+ *
+ * Returns: Pointer to config_group
+ */
+static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct config_group *ntb_group = &ntb->group;
+ struct device *dev = &epf->dev;
+
+ config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type);
+
+ return ntb_group;
+}
+
+/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
+
+static u32 pci_space[] = {
+ 0xffffffff, /* Device ID, Vendor ID */
+ 0, /* Status, Command */
+ 0xffffffff, /* Base Class, Subclass, Prog Intf, Revision ID */
+ 0x40, /* BIST, Header Type, Latency Timer, Cache Line Size */
+ 0, /* BAR 0 */
+ 0, /* BAR 1 */
+ 0, /* BAR 2 */
+ 0, /* BAR 3 */
+ 0, /* BAR 4 */
+ 0, /* BAR 5 */
+ 0, /* Cardbus CIS Pointer */
+ 0, /* Subsystem ID, Subsystem Vendor ID */
+ 0, /* ROM Base Address */
+ 0, /* Reserved, Capabilities Pointer */
+ 0, /* Reserved */
+ 0, /* Max_Lat, Min_Gnt, Interrupt Pin, Interrupt Line */
+};
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ if (devfn == 0) {
+ memcpy(val, ((u8 *)pci_space) + where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ return 0;
+}
+
+static struct pci_ops vpci_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+static int vpci_scan_bus(void *sysdata)
+{
+ struct pci_bus *vpci_bus;
+ struct epf_ntb *ndev = sysdata;
+
+ vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
+ if (vpci_bus)
+ pr_err("create pci bus\n");
+
+ pci_bus_add_devices(vpci_bus);
+
+ return 0;
+}
+
+/*==================== Virtual PCIe NTB driver ==========================*/
+
+static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct epf_ntb *ndev = ntb_ndev(ntb);
+
+ return ndev->num_mws;
+}
+
+static int vntb_epf_spad_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->spad_count;
+}
+
+static int vntb_epf_peer_mw_count(struct ntb_dev *ntb)
+{
+ return ntb_ndev(ntb)->num_mws;
+}
+
+static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb)
+{
+ return BIT_ULL(ntb_ndev(ntb)->db_count) - 1;
+}
+
+static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct pci_epf_bar *epf_bar;
+ enum pci_barno barno;
+ int ret;
+ struct device *dev;
+
+ dev = &ntb->ntb.dev;
+ barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ epf_bar = &ntb->epf->bar[barno];
+ epf_bar->phys_addr = addr;
+ epf_bar->barno = barno;
+ epf_bar->size = size;
+
+ ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
+ if (ret) {
+ dev_err(dev, "failure set mw trans\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
+{
+ return 0;
+}
+
+static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx,
+ phys_addr_t *base, resource_size_t *size)
+{
+
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (base)
+ *base = ntb->vpci_mw_phy[idx];
+
+ if (size)
+ *size = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static int vntb_epf_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ return 0;
+}
+
+static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * sizeof(u32);
+ u32 val;
+ void __iomem *base = (void __iomem *)ntb->reg;
+
+ val = readl(base + off + ct + idx * sizeof(u32));
+ return val;
+}
+
+static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset, ct = ctrl->spad_count * sizeof(u32);
+ void __iomem *base = (void __iomem *)ntb->reg;
+
+ writel(val, base + off + ct + idx * sizeof(u32));
+ return 0;
+}
+
+static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = (void __iomem *)ntb->reg;
+ u32 val;
+
+ val = readl(base + off + idx * sizeof(u32));
+ return val;
+}
+
+static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ struct epf_ntb_ctrl *ctrl = ntb->reg;
+ int off = ctrl->spad_offset;
+ void __iomem *base = (void __iomem *)ntb->reg;
+
+ writel(val, base + off + idx * sizeof(u32));
+ return 0;
+}
+
+static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
+{
+ u32 interrupt_num = ffs(db_bits) + 1;
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+ u8 func_no, vfunc_no;
+ int ret;
+
+ func_no = ntb->epf->func_no;
+ vfunc_no = ntb->epf->vfunc_no;
+
+ ret = pci_epc_raise_irq(ntb->epf->epc,
+ func_no,
+ vfunc_no,
+ PCI_EPC_IRQ_MSI,
+ interrupt_num + 1);
+ if (ret)
+ dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
+
+ return ret;
+}
+
+static u64 vntb_epf_db_read(struct ntb_dev *ndev)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->db;
+}
+
+static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ if (addr_align)
+ *addr_align = SZ_4K;
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = ntb->mws_size[idx];
+
+ return 0;
+}
+
+static u64 vntb_epf_link_is_up(struct ntb_dev *ndev,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ return ntb->reg->link_status;
+}
+
+static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits)
+{
+ return 0;
+}
+
+static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
+{
+ struct epf_ntb *ntb = ntb_ndev(ndev);
+
+ ntb->db &= ~db_bits;
+ return 0;
+}
+
+static int vntb_epf_link_disable(struct ntb_dev *ntb)
+{
+ return 0;
+}
+
+static const struct ntb_dev_ops vntb_epf_ops = {
+ .mw_count = vntb_epf_mw_count,
+ .spad_count = vntb_epf_spad_count,
+ .peer_mw_count = vntb_epf_peer_mw_count,
+ .db_valid_mask = vntb_epf_db_valid_mask,
+ .db_set_mask = vntb_epf_db_set_mask,
+ .mw_set_trans = vntb_epf_mw_set_trans,
+ .mw_clear_trans = vntb_epf_mw_clear_trans,
+ .peer_mw_get_addr = vntb_epf_peer_mw_get_addr,
+ .link_enable = vntb_epf_link_enable,
+ .spad_read = vntb_epf_spad_read,
+ .spad_write = vntb_epf_spad_write,
+ .peer_spad_read = vntb_epf_peer_spad_read,
+ .peer_spad_write = vntb_epf_peer_spad_write,
+ .peer_db_set = vntb_epf_peer_db_set,
+ .db_read = vntb_epf_db_read,
+ .mw_get_align = vntb_epf_mw_get_align,
+ .link_is_up = vntb_epf_link_is_up,
+ .db_clear_mask = vntb_epf_db_clear_mask,
+ .db_clear = vntb_epf_db_clear,
+ .link_disable = vntb_epf_link_disable,
+};
+
+static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int ret;
+ struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata;
+ struct device *dev = &pdev->dev;
+
+ ndev->ntb.pdev = pdev;
+ ndev->ntb.topo = NTB_TOPO_NONE;
+ ndev->ntb.ops = &vntb_epf_ops;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ return -EINVAL;
+ }
+
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+ goto err_register_dev;
+ }
+
+ dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
+ return 0;
+
+err_register_dev:
+ put_device(&ndev->ntb.dev);
+ return -EINVAL;
+}
+
+static struct pci_device_id pci_vntb_table[] = {
+ {
+ PCI_DEVICE(0xffff, 0xffff),
+ },
+ {},
+};
+
+static struct pci_driver vntb_pci_driver = {
+ .name = "pci-vntb",
+ .id_table = pci_vntb_table,
+ .probe = pci_vntb_probe,
+};
+
+/* ============ PCIe EPF Driver Bind ====================*/
+
+/**
+ * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality
+ * @epf: NTB endpoint function device
+ *
+ * Initialize both the endpoint controllers associated with NTB function device.
+ * Invoked when a primary interface or secondary interface is bound to EPC
+ * device. This function will succeed only when EPC is bound to both the
+ * interfaces.
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_bind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ int ret;
+
+ if (!epf->epc) {
+ dev_dbg(dev, "PRIMARY EPC interface not yet bound\n");
+ return 0;
+ }
+
+ ret = epf_ntb_init_epc_bar(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to create NTB EPC\n");
+ goto err_bar_init;
+ }
+
+ ret = epf_ntb_config_spad_bar_alloc(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to allocate BAR memory\n");
+ goto err_bar_alloc;
+ }
+
+ ret = epf_ntb_epc_init(ntb);
+ if (ret) {
+ dev_err(dev, "Failed to initialize EPC\n");
+ goto err_bar_alloc;
+ }
+
+ epf_set_drvdata(epf, ntb);
+
+ pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid;
+ pci_vntb_table[0].vendor = ntb->vntb_vid;
+ pci_vntb_table[0].device = ntb->vntb_pid;
+
+ ret = pci_register_driver(&vntb_pci_driver);
+ if (ret) {
+ dev_err(dev, "failure register vntb pci driver\n");
+ goto err_bar_alloc;
+ }
+
+ vpci_scan_bus(ntb);
+
+ return 0;
+
+err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+err_bar_init:
+ epf_ntb_epc_destroy(ntb);
+
+ return ret;
+}
+
+/**
+ * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind()
+ * @epf: NTB endpoint function device
+ *
+ * Cleanup the initialization from epf_ntb_bind()
+ */
+static void epf_ntb_unbind(struct pci_epf *epf)
+{
+ struct epf_ntb *ntb = epf_get_drvdata(epf);
+
+ epf_ntb_epc_cleanup(ntb);
+ epf_ntb_config_spad_bar_free(ntb);
+ epf_ntb_epc_destroy(ntb);
+
+ pci_unregister_driver(&vntb_pci_driver);
+}
+
+// EPF driver probe
+static struct pci_epf_ops epf_ntb_ops = {
+ .bind = epf_ntb_bind,
+ .unbind = epf_ntb_unbind,
+ .add_cfs = epf_ntb_add_cfs,
+};
+
+/**
+ * epf_ntb_probe() - Probe NTB function driver
+ * @epf: NTB endpoint function device
+ * @id: NTB endpoint function device ID
+ *
+ * Probe NTB function driver when endpoint function bus detects a NTB
+ * endpoint function.
+ *
+ * Returns: Zero for success, or an error code in case of failure
+ */
+static int epf_ntb_probe(struct pci_epf *epf,
+ const struct pci_epf_device_id *id)
+{
+ struct epf_ntb *ntb;
+ struct device *dev;
+
+ dev = &epf->dev;
+
+ ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL);
+ if (!ntb)
+ return -ENOMEM;
+
+ epf->header = &epf_ntb_header;
+ ntb->epf = epf;
+ ntb->vbus_number = 0xff;
+ epf_set_drvdata(epf, ntb);
+
+ dev_info(dev, "pci-ep epf driver loaded\n");
+ return 0;
+}
+
+static const struct pci_epf_device_id epf_ntb_ids[] = {
+ {
+ .name = "pci_epf_vntb",
+ },
+ {},
+};
+
+static struct pci_epf_driver epf_ntb_driver = {
+ .driver.name = "pci_epf_vntb",
+ .probe = epf_ntb_probe,
+ .id_table = epf_ntb_ids,
+ .ops = &epf_ntb_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init epf_ntb_init(void)
+{
+ int ret;
+
+ kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&epf_ntb_driver);
+ if (ret) {
+ destroy_workqueue(kpcintb_workqueue);
+ pr_err("Failed to register pci epf ntb driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(epf_ntb_init);
+
+static void __exit epf_ntb_exit(void)
+{
+ pci_epf_unregister_driver(&epf_ntb_driver);
+ destroy_workqueue(kpcintb_workqueue);
+}
+module_exit(epf_ntb_exit);
+
+MODULE_DESCRIPTION("PCI EPF NTB DRIVER");
+MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
new file mode 100644
index 0000000000..0ea64e24ed
--- /dev/null
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * configfs to configure the PCI endpoint
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static DEFINE_IDR(functions_idr);
+static DEFINE_MUTEX(functions_mutex);
+static struct config_group *functions_group;
+static struct config_group *controllers_group;
+
+struct pci_epf_group {
+ struct config_group group;
+ struct config_group primary_epc_group;
+ struct config_group secondary_epc_group;
+ struct config_group *type_group;
+ struct delayed_work cfs_work;
+ struct pci_epf *epf;
+ int index;
+};
+
+struct pci_epc_group {
+ struct config_group group;
+ struct pci_epc *epc;
+ bool start;
+};
+
+static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct pci_epf_group, group);
+}
+
+static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct pci_epc_group, group);
+}
+
+static int pci_secondary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, SECONDARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_secondary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, SECONDARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_secondary_epc_item_ops = {
+ .allow_link = pci_secondary_epc_epf_link,
+ .drop_link = pci_secondary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_secondary_epc_type = {
+ .ct_item_ops = &pci_secondary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_secondary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *secondary_epc_group;
+
+ secondary_epc_group = &epf_group->secondary_epc_group;
+ config_group_init_type_name(secondary_epc_group, "secondary",
+ &pci_secondary_epc_type);
+ configfs_register_group(&epf_group->group, secondary_epc_group);
+
+ return secondary_epc_group;
+}
+
+static int pci_primary_epc_epf_link(struct config_item *epf_item,
+ struct config_item *epc_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_primary_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item->ci_parent);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_primary_epc_item_ops = {
+ .allow_link = pci_primary_epc_epf_link,
+ .drop_link = pci_primary_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_primary_epc_type = {
+ .ct_item_ops = &pci_primary_epc_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group
+*pci_ep_cfs_add_primary_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *primary_epc_group = &epf_group->primary_epc_group;
+
+ config_group_init_type_name(primary_epc_group, "primary",
+ &pci_primary_epc_type);
+ configfs_register_group(&epf_group->group, primary_epc_group);
+
+ return primary_epc_group;
+}
+
+static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ int ret;
+ bool start;
+ struct pci_epc *epc;
+ struct pci_epc_group *epc_group = to_pci_epc_group(item);
+
+ epc = epc_group->epc;
+
+ if (kstrtobool(page, &start) < 0)
+ return -EINVAL;
+
+ if (start == epc_group->start)
+ return -EALREADY;
+
+ if (!start) {
+ pci_epc_stop(epc);
+ epc_group->start = 0;
+ return len;
+ }
+
+ ret = pci_epc_start(epc);
+ if (ret) {
+ dev_err(&epc->dev, "failed to start endpoint controller\n");
+ return -EINVAL;
+ }
+
+ epc_group->start = start;
+
+ return len;
+}
+
+static ssize_t pci_epc_start_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%d\n", to_pci_epc_group(item)->start);
+}
+
+CONFIGFS_ATTR(pci_epc_, start);
+
+static struct configfs_attribute *pci_epc_attrs[] = {
+ &pci_epc_attr_start,
+ NULL,
+};
+
+static int pci_epc_epf_link(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ int ret;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+ struct pci_epc *epc = epc_group->epc;
+ struct pci_epf *epf = epf_group->epf;
+
+ ret = pci_epc_add_epf(epc, epf, PRIMARY_INTERFACE);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_bind(epf);
+ if (ret) {
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pci_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf, PRIMARY_INTERFACE);
+}
+
+static struct configfs_item_operations pci_epc_item_ops = {
+ .allow_link = pci_epc_epf_link,
+ .drop_link = pci_epc_epf_unlink,
+};
+
+static const struct config_item_type pci_epc_type = {
+ .ct_item_ops = &pci_epc_item_ops,
+ .ct_attrs = pci_epc_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct config_group *group;
+ struct pci_epc_group *epc_group;
+
+ epc_group = kzalloc(sizeof(*epc_group), GFP_KERNEL);
+ if (!epc_group) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ group = &epc_group->group;
+
+ config_group_init_type_name(group, name, &pci_epc_type);
+ ret = configfs_register_group(controllers_group, group);
+ if (ret) {
+ pr_err("failed to register configfs group for %s\n", name);
+ goto err_register_group;
+ }
+
+ epc = pci_epc_get(name);
+ if (IS_ERR(epc)) {
+ ret = PTR_ERR(epc);
+ goto err_epc_get;
+ }
+
+ epc_group->epc = epc;
+
+ return group;
+
+err_epc_get:
+ configfs_unregister_group(group);
+
+err_register_group:
+ kfree(epc_group);
+
+err:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epc_group);
+
+void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+ struct pci_epc_group *epc_group;
+
+ if (!group)
+ return;
+
+ epc_group = container_of(group, struct pci_epc_group, group);
+ pci_epc_put(epc_group->epc);
+ configfs_unregister_group(&epc_group->group);
+ kfree(epc_group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epc_group);
+
+#define PCI_EPF_HEADER_R(_name) \
+static ssize_t pci_epf_##_name##_show(struct config_item *item, char *page) \
+{ \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ return sysfs_emit(page, "0x%04x\n", epf->header->_name); \
+}
+
+#define PCI_EPF_HEADER_W_u32(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u32 val; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ if (kstrtou32(page, 0, &val) < 0) \
+ return -EINVAL; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+#define PCI_EPF_HEADER_W_u16(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u16 val; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ if (kstrtou16(page, 0, &val) < 0) \
+ return -EINVAL; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+#define PCI_EPF_HEADER_W_u8(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u8 val; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ if (kstrtou8(page, 0, &val) < 0) \
+ return -EINVAL; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+static ssize_t pci_epf_msi_interrupts_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ u8 val;
+
+ if (kstrtou8(page, 0, &val) < 0)
+ return -EINVAL;
+
+ to_pci_epf_group(item)->epf->msi_interrupts = val;
+
+ return len;
+}
+
+static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
+ char *page)
+{
+ return sysfs_emit(page, "%d\n",
+ to_pci_epf_group(item)->epf->msi_interrupts);
+}
+
+static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ u16 val;
+
+ if (kstrtou16(page, 0, &val) < 0)
+ return -EINVAL;
+
+ to_pci_epf_group(item)->epf->msix_interrupts = val;
+
+ return len;
+}
+
+static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
+ char *page)
+{
+ return sysfs_emit(page, "%d\n",
+ to_pci_epf_group(item)->epf->msix_interrupts);
+}
+
+PCI_EPF_HEADER_R(vendorid)
+PCI_EPF_HEADER_W_u16(vendorid)
+
+PCI_EPF_HEADER_R(deviceid)
+PCI_EPF_HEADER_W_u16(deviceid)
+
+PCI_EPF_HEADER_R(revid)
+PCI_EPF_HEADER_W_u8(revid)
+
+PCI_EPF_HEADER_R(progif_code)
+PCI_EPF_HEADER_W_u8(progif_code)
+
+PCI_EPF_HEADER_R(subclass_code)
+PCI_EPF_HEADER_W_u8(subclass_code)
+
+PCI_EPF_HEADER_R(baseclass_code)
+PCI_EPF_HEADER_W_u8(baseclass_code)
+
+PCI_EPF_HEADER_R(cache_line_size)
+PCI_EPF_HEADER_W_u8(cache_line_size)
+
+PCI_EPF_HEADER_R(subsys_vendor_id)
+PCI_EPF_HEADER_W_u16(subsys_vendor_id)
+
+PCI_EPF_HEADER_R(subsys_id)
+PCI_EPF_HEADER_W_u16(subsys_id)
+
+PCI_EPF_HEADER_R(interrupt_pin)
+PCI_EPF_HEADER_W_u8(interrupt_pin)
+
+CONFIGFS_ATTR(pci_epf_, vendorid);
+CONFIGFS_ATTR(pci_epf_, deviceid);
+CONFIGFS_ATTR(pci_epf_, revid);
+CONFIGFS_ATTR(pci_epf_, progif_code);
+CONFIGFS_ATTR(pci_epf_, subclass_code);
+CONFIGFS_ATTR(pci_epf_, baseclass_code);
+CONFIGFS_ATTR(pci_epf_, cache_line_size);
+CONFIGFS_ATTR(pci_epf_, subsys_vendor_id);
+CONFIGFS_ATTR(pci_epf_, subsys_id);
+CONFIGFS_ATTR(pci_epf_, interrupt_pin);
+CONFIGFS_ATTR(pci_epf_, msi_interrupts);
+CONFIGFS_ATTR(pci_epf_, msix_interrupts);
+
+static struct configfs_attribute *pci_epf_attrs[] = {
+ &pci_epf_attr_vendorid,
+ &pci_epf_attr_deviceid,
+ &pci_epf_attr_revid,
+ &pci_epf_attr_progif_code,
+ &pci_epf_attr_subclass_code,
+ &pci_epf_attr_baseclass_code,
+ &pci_epf_attr_cache_line_size,
+ &pci_epf_attr_subsys_vendor_id,
+ &pci_epf_attr_subsys_id,
+ &pci_epf_attr_interrupt_pin,
+ &pci_epf_attr_msi_interrupts,
+ &pci_epf_attr_msix_interrupts,
+ NULL,
+};
+
+static int pci_epf_vepf_link(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ return pci_epf_add_vepf(epf_pf, epf_vf);
+}
+
+static void pci_epf_vepf_unlink(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ pci_epf_remove_vepf(epf_pf, epf_vf);
+}
+
+static void pci_epf_release(struct config_item *item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(item);
+
+ mutex_lock(&functions_mutex);
+ idr_remove(&functions_idr, epf_group->index);
+ mutex_unlock(&functions_mutex);
+ pci_epf_destroy(epf_group->epf);
+ kfree(epf_group);
+}
+
+static struct configfs_item_operations pci_epf_ops = {
+ .allow_link = pci_epf_vepf_link,
+ .drop_link = pci_epf_vepf_unlink,
+ .release = pci_epf_release,
+};
+
+static const struct config_item_type pci_epf_type = {
+ .ct_item_ops = &pci_epf_ops,
+ .ct_attrs = pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/**
+ * pci_epf_type_add_cfs() - Help function drivers to expose function specific
+ * attributes in configfs
+ * @epf: the EPF device that has to be configured using configfs
+ * @group: the parent configfs group (corresponding to entries in
+ * pci_epf_device_id)
+ *
+ * Invoke to expose function specific attributes in configfs.
+ *
+ * Return: A pointer to a config_group structure or NULL if the function driver
+ * does not have anything to expose (attributes configured by user) or if
+ * the function driver does not implement the add_cfs() method.
+ *
+ * Returns an error pointer if this function is called for an unbound EPF device
+ * or if the EPF driver add_cfs() method fails.
+ */
+static struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct config_group *epf_type_group;
+
+ if (!epf->driver) {
+ dev_err(&epf->dev, "epf device not bound to driver\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!epf->driver->ops->add_cfs)
+ return NULL;
+
+ mutex_lock(&epf->lock);
+ epf_type_group = epf->driver->ops->add_cfs(epf, group);
+ mutex_unlock(&epf->lock);
+
+ return epf_type_group;
+}
+
+static void pci_ep_cfs_add_type_group(struct pci_epf_group *epf_group)
+{
+ struct config_group *group;
+
+ group = pci_epf_type_add_cfs(epf_group->epf, &epf_group->group);
+ if (!group)
+ return;
+
+ if (IS_ERR(group)) {
+ dev_err(&epf_group->epf->dev,
+ "failed to create epf type specific attributes\n");
+ return;
+ }
+
+ configfs_register_group(&epf_group->group, group);
+}
+
+static void pci_epf_cfs_work(struct work_struct *work)
+{
+ struct pci_epf_group *epf_group;
+ struct config_group *group;
+
+ epf_group = container_of(work, struct pci_epf_group, cfs_work.work);
+ group = pci_ep_cfs_add_primary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'primary' EPC interface\n");
+ return;
+ }
+
+ group = pci_ep_cfs_add_secondary_group(epf_group);
+ if (IS_ERR(group)) {
+ pr_err("failed to create 'secondary' EPC interface\n");
+ return;
+ }
+
+ pci_ep_cfs_add_type_group(epf_group);
+}
+
+static struct config_group *pci_epf_make(struct config_group *group,
+ const char *name)
+{
+ struct pci_epf_group *epf_group;
+ struct pci_epf *epf;
+ char *epf_name;
+ int index, err;
+
+ epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
+ if (!epf_group)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&functions_mutex);
+ index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
+ mutex_unlock(&functions_mutex);
+ if (index < 0) {
+ err = index;
+ goto free_group;
+ }
+
+ epf_group->index = index;
+
+ config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
+
+ epf_name = kasprintf(GFP_KERNEL, "%s.%d",
+ group->cg_item.ci_name, epf_group->index);
+ if (!epf_name) {
+ err = -ENOMEM;
+ goto remove_idr;
+ }
+
+ epf = pci_epf_create(epf_name);
+ if (IS_ERR(epf)) {
+ pr_err("failed to create endpoint function device\n");
+ err = -EINVAL;
+ goto free_name;
+ }
+
+ epf->group = &epf_group->group;
+ epf_group->epf = epf;
+
+ kfree(epf_name);
+
+ INIT_DELAYED_WORK(&epf_group->cfs_work, pci_epf_cfs_work);
+ queue_delayed_work(system_wq, &epf_group->cfs_work,
+ msecs_to_jiffies(1));
+
+ return &epf_group->group;
+
+free_name:
+ kfree(epf_name);
+
+remove_idr:
+ mutex_lock(&functions_mutex);
+ idr_remove(&functions_idr, epf_group->index);
+ mutex_unlock(&functions_mutex);
+
+free_group:
+ kfree(epf_group);
+
+ return ERR_PTR(err);
+}
+
+static void pci_epf_drop(struct config_group *group, struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations pci_epf_group_ops = {
+ .make_group = &pci_epf_make,
+ .drop_item = &pci_epf_drop,
+};
+
+static const struct config_item_type pci_epf_group_type = {
+ .ct_group_ops = &pci_epf_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+ struct config_group *group;
+
+ group = configfs_register_default_group(functions_group, name,
+ &pci_epf_group_type);
+ if (IS_ERR(group))
+ pr_err("failed to register configfs group for %s function\n",
+ name);
+
+ return group;
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epf_group);
+
+void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+ if (IS_ERR_OR_NULL(group))
+ return;
+
+ configfs_unregister_default_group(group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
+
+static const struct config_item_type pci_functions_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static const struct config_item_type pci_controllers_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static const struct config_item_type pci_ep_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem pci_ep_cfs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "pci_ep",
+ .ci_type = &pci_ep_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(pci_ep_cfs_subsys.su_mutex),
+};
+
+static int __init pci_ep_cfs_init(void)
+{
+ int ret;
+ struct config_group *root = &pci_ep_cfs_subsys.su_group;
+
+ config_group_init(root);
+
+ ret = configfs_register_subsystem(&pci_ep_cfs_subsys);
+ if (ret) {
+ pr_err("Error %d while registering subsystem %s\n",
+ ret, root->cg_item.ci_namebuf);
+ goto err;
+ }
+
+ functions_group = configfs_register_default_group(root, "functions",
+ &pci_functions_type);
+ if (IS_ERR(functions_group)) {
+ ret = PTR_ERR(functions_group);
+ pr_err("Error %d while registering functions group\n",
+ ret);
+ goto err_functions_group;
+ }
+
+ controllers_group =
+ configfs_register_default_group(root, "controllers",
+ &pci_controllers_type);
+ if (IS_ERR(controllers_group)) {
+ ret = PTR_ERR(controllers_group);
+ pr_err("Error %d while registering controllers group\n",
+ ret);
+ goto err_controllers_group;
+ }
+
+ return 0;
+
+err_controllers_group:
+ configfs_unregister_default_group(functions_group);
+
+err_functions_group:
+ configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+
+err:
+ return ret;
+}
+module_init(pci_ep_cfs_init);
+
+static void __exit pci_ep_cfs_exit(void)
+{
+ configfs_unregister_default_group(controllers_group);
+ configfs_unregister_default_group(functions_group);
+ configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+}
+module_exit(pci_ep_cfs_exit);
+
+MODULE_DESCRIPTION("PCI EP CONFIGFS");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
new file mode 100644
index 0000000000..a7d3a92391
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -0,0 +1,931 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Endpoint *Controller* (EPC) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct class *pci_epc_class;
+
+static void devm_pci_epc_release(struct device *dev, void *res)
+{
+ struct pci_epc *epc = *(struct pci_epc **)res;
+
+ pci_epc_destroy(epc);
+}
+
+static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
+{
+ struct pci_epc **epc = res;
+
+ return *epc == match_data;
+}
+
+/**
+ * pci_epc_put() - release the PCI endpoint controller
+ * @epc: epc returned by pci_epc_get()
+ *
+ * release the refcount the caller obtained by invoking pci_epc_get()
+ */
+void pci_epc_put(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ module_put(epc->ops->owner);
+ put_device(&epc->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epc_put);
+
+/**
+ * pci_epc_get() - get the PCI endpoint controller
+ * @epc_name: device name of the endpoint controller
+ *
+ * Invoke to get struct pci_epc * corresponding to the device name of the
+ * endpoint controller
+ */
+struct pci_epc *pci_epc_get(const char *epc_name)
+{
+ int ret = -EINVAL;
+ struct pci_epc *epc;
+ struct device *dev;
+ struct class_dev_iter iter;
+
+ class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (strcmp(epc_name, dev_name(dev)))
+ continue;
+
+ epc = to_pci_epc(dev);
+ if (!try_module_get(epc->ops->owner)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ class_dev_iter_exit(&iter);
+ get_device(&epc->dev);
+ return epc;
+ }
+
+err:
+ class_dev_iter_exit(&iter);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get);
+
+/**
+ * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
+ * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
+ *
+ * Invoke to get the first unreserved BAR that can be used by the endpoint
+ * function. For any incorrect value in reserved_bar return '0'.
+ */
+enum pci_barno
+pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
+{
+ return pci_epc_get_next_free_bar(epc_features, BAR_0);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
+
+/**
+ * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
+ * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
+ * @bar: the starting BAR number from where unreserved BAR should be searched
+ *
+ * Invoke to get the next unreserved BAR starting from @bar that can be used
+ * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ */
+enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ *epc_features, enum pci_barno bar)
+{
+ unsigned long free_bar;
+
+ if (!epc_features)
+ return BAR_0;
+
+ /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
+ if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+ bar++;
+
+ /* Find if the reserved BAR is also a 64-bit BAR */
+ free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
+
+ /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
+ free_bar <<= 1;
+ free_bar |= epc_features->reserved_bar;
+
+ free_bar = find_next_zero_bit(&free_bar, 6, bar);
+ if (free_bar > 5)
+ return NO_BAR;
+
+ return free_bar;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
+
+/**
+ * pci_epc_get_features() - get the features supported by EPC
+ * @epc: the features supported by *this* EPC device will be returned
+ * @func_no: the features supported by the EPC device specific to the
+ * endpoint function with func_no will be returned
+ * @vfunc_no: the features supported by the EPC device specific to the
+ * virtual endpoint function with vfunc_no will be returned
+ *
+ * Invoke to get the features provided by the EPC which may be
+ * specific to an endpoint function. Returns pci_epc_features on success
+ * and NULL for any failures.
+ */
+const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
+ u8 func_no, u8 vfunc_no)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return NULL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return NULL;
+
+ if (!epc->ops->get_features)
+ return NULL;
+
+ mutex_lock(&epc->lock);
+ epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
+ mutex_unlock(&epc->lock);
+
+ return epc_features;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_features);
+
+/**
+ * pci_epc_stop() - stop the PCI link
+ * @epc: the link of the EPC device that has to be stopped
+ *
+ * Invoke to stop the PCI link
+ */
+void pci_epc_stop(struct pci_epc *epc)
+{
+ if (IS_ERR(epc) || !epc->ops->stop)
+ return;
+
+ mutex_lock(&epc->lock);
+ epc->ops->stop(epc);
+ mutex_unlock(&epc->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_stop);
+
+/**
+ * pci_epc_start() - start the PCI link
+ * @epc: the link of *this* EPC device has to be started
+ *
+ * Invoke to start the PCI link
+ */
+int pci_epc_start(struct pci_epc *epc)
+{
+ int ret;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->start)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->start(epc);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_start);
+
+/**
+ * pci_epc_raise_irq() - interrupt the host system
+ * @epc: the EPC device which has to interrupt the host
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @type: specify the type of interrupt; legacy, MSI or MSI-X
+ * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
+ *
+ * Invoke to raise an legacy, MSI or MSI-X interrupt
+ */
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ if (!epc->ops->raise_irq)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
+
+/**
+ * pci_epc_map_msi_irq() - Map physical address to MSI address and return
+ * MSI data
+ * @epc: the EPC device which has the MSI capability
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @phys_addr: the physical address of the outbound region
+ * @interrupt_num: the MSI interrupt number with range (1-N)
+ * @entry_size: Size of Outbound address region for each interrupt
+ * @msi_data: the data that should be written in order to raise MSI interrupt
+ * with interrupt number as 'interrupt num'
+ * @msi_addr_offset: Offset of MSI address from the aligned outbound address
+ * to which the MSI address is mapped
+ *
+ * Invoke to map physical address to MSI address and return MSI data. The
+ * physical address should be an address in the outbound region. This is
+ * required to implement doorbell functionality of NTB wherein EPC on either
+ * side of the interface (primary and secondary) can directly write to the
+ * physical address (in outbound region) of the other interface to ring
+ * doorbell.
+ */
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
+ u32 *msi_data, u32 *msi_addr_offset)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc))
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ if (!epc->ops->map_msi_irq)
+ return -EINVAL;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
+ interrupt_num, entry_size, msi_data,
+ msi_addr_offset);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
+
+/**
+ * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
+ * @epc: the EPC device to which MSI interrupts was requested
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ *
+ * Invoke to get the number of MSI interrupts allocated by the RC
+ */
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ int interrupt;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return 0;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
+ if (!epc->ops->get_msi)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
+ mutex_unlock(&epc->lock);
+
+ if (interrupt < 0)
+ return 0;
+
+ interrupt = 1 << interrupt;
+
+ return interrupt;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_msi);
+
+/**
+ * pci_epc_set_msi() - set the number of MSI interrupt numbers required
+ * @epc: the EPC device on which MSI has to be configured
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @interrupts: number of MSI interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI interrupts.
+ */
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
+{
+ int ret;
+ u8 encode_int;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ interrupts < 1 || interrupts > 32)
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ if (!epc->ops->set_msi)
+ return 0;
+
+ encode_int = order_base_2(interrupts);
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_msi);
+
+/**
+ * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
+ * @epc: the EPC device to which MSI-X interrupts was requested
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ *
+ * Invoke to get the number of MSI-X interrupts allocated by the RC
+ */
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ int interrupt;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return 0;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
+ if (!epc->ops->get_msix)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
+ mutex_unlock(&epc->lock);
+
+ if (interrupt < 0)
+ return 0;
+
+ return interrupt + 1;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_msix);
+
+/**
+ * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
+ * @epc: the EPC device on which MSI-X has to be configured
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @interrupts: number of MSI-X interrupts required by the EPF
+ * @bir: BAR where the MSI-X table resides
+ * @offset: Offset pointing to the start of MSI-X table
+ *
+ * Invoke to set the required number of MSI-X interrupts.
+ */
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ interrupts < 1 || interrupts > 2048)
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ if (!epc->ops->set_msix)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
+ offset);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_msix);
+
+/**
+ * pci_epc_unmap_addr() - unmap CPU address from PCI address
+ * @epc: the EPC device on which address is allocated
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @phys_addr: physical address of the local system
+ *
+ * Invoke to unmap the CPU address from PCI address.
+ */
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr)
+{
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
+ if (!epc->ops->unmap_addr)
+ return;
+
+ mutex_lock(&epc->lock);
+ epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
+ mutex_unlock(&epc->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+
+/**
+ * pci_epc_map_addr() - map CPU address to PCI address
+ * @epc: the EPC device on which address is allocated
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @phys_addr: physical address of the local system
+ * @pci_addr: PCI address to which the physical address should be mapped
+ * @size: the size of the allocation
+ *
+ * Invoke to map CPU address with PCI address.
+ */
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u64 pci_addr, size_t size)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ if (!epc->ops->map_addr)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
+ size);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+
+/**
+ * pci_epc_clear_bar() - reset the BAR
+ * @epc: the EPC device for which the BAR has to be cleared
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @epf_bar: the struct epf_bar that contains the BAR information
+ *
+ * Invoke to reset the BAR of the endpoint device.
+ */
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ (epf_bar->barno == BAR_5 &&
+ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ return;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
+ if (!epc->ops->clear_bar)
+ return;
+
+ mutex_lock(&epc->lock);
+ epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
+ mutex_unlock(&epc->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+
+/**
+ * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
+ * @epc: the EPC device on which BAR has to be configured
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @epf_bar: the struct epf_bar that contains the BAR information
+ *
+ * Invoke to configure the BAR of the endpoint device.
+ */
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ int ret;
+ int flags = epf_bar->flags;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
+ (epf_bar->barno == BAR_5 &&
+ flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
+ (flags & PCI_BASE_ADDRESS_SPACE_IO &&
+ flags & PCI_BASE_ADDRESS_IO_MASK) ||
+ (upper_32_bits(epf_bar->size) &&
+ !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ if (!epc->ops->set_bar)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+
+/**
+ * pci_epc_write_header() - write standard configuration header
+ * @epc: the EPC device to which the configuration header should be written
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @header: standard configuration header fields
+ *
+ * Invoke to write the configuration header to the endpoint controller. Every
+ * endpoint controller will have a dedicated location to which the standard
+ * configuration header would be written. The callback function should write
+ * the header fields to this dedicated location.
+ */
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_header *header)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ /* Only Virtual Function #1 has deviceID */
+ if (vfunc_no > 1)
+ return -EINVAL;
+
+ if (!epc->ops->write_header)
+ return 0;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_write_header);
+
+/**
+ * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
+ * @epc: the EPC device to which the endpoint function should be added
+ * @epf: the endpoint function to be added
+ * @type: Identifies if the EPC is connected to the primary or secondary
+ * interface of EPF
+ *
+ * A PCI endpoint device can have one or more functions. In the case of PCIe,
+ * the specification allows up to 8 PCIe endpoint functions. Invoke
+ * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
+ */
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
+{
+ struct list_head *list;
+ u32 func_no;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(epc) || epf->is_vf)
+ return -EINVAL;
+
+ if (type == PRIMARY_INTERFACE && epf->epc)
+ return -EBUSY;
+
+ if (type == SECONDARY_INTERFACE && epf->sec_epc)
+ return -EBUSY;
+
+ mutex_lock(&epc->list_lock);
+ func_no = find_first_zero_bit(&epc->function_num_map,
+ BITS_PER_LONG);
+ if (func_no >= BITS_PER_LONG) {
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (func_no > epc->max_functions - 1) {
+ dev_err(&epc->dev, "Exceeding max supported Function Number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ set_bit(func_no, &epc->function_num_map);
+ if (type == PRIMARY_INTERFACE) {
+ epf->func_no = func_no;
+ epf->epc = epc;
+ list = &epf->list;
+ } else {
+ epf->sec_epc_func_no = func_no;
+ epf->sec_epc = epc;
+ list = &epf->sec_epc_list;
+ }
+
+ list_add_tail(list, &epc->pci_epf);
+ret:
+ mutex_unlock(&epc->list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_add_epf);
+
+/**
+ * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
+ * @epc: the EPC device from which the endpoint function should be removed
+ * @epf: the endpoint function to be removed
+ * @type: identifies if the EPC is connected to the primary or secondary
+ * interface of EPF
+ *
+ * Invoke to remove PCI endpoint function from the endpoint controller.
+ */
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type)
+{
+ struct list_head *list;
+ u32 func_no = 0;
+
+ if (!epc || IS_ERR(epc) || !epf)
+ return;
+
+ if (type == PRIMARY_INTERFACE) {
+ func_no = epf->func_no;
+ list = &epf->list;
+ } else {
+ func_no = epf->sec_epc_func_no;
+ list = &epf->sec_epc_list;
+ }
+
+ mutex_lock(&epc->list_lock);
+ clear_bit(func_no, &epc->function_num_map);
+ list_del(list);
+ epf->epc = NULL;
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
+
+/**
+ * pci_epc_linkup() - Notify the EPF device that EPC device has established a
+ * connection with the Root Complex.
+ * @epc: the EPC device which has established link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has established a
+ * connection with the Root Complex.
+ */
+void pci_epc_linkup(struct pci_epc *epc)
+{
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->link_up)
+ epf->event_ops->link_up(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkup);
+
+/**
+ * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
+ * connection with the Root Complex.
+ * @epc: the EPC device which has dropped the link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has dropped the
+ * connection with the Root Complex.
+ */
+void pci_epc_linkdown(struct pci_epc *epc)
+{
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->link_down)
+ epf->event_ops->link_down(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkdown);
+
+/**
+ * pci_epc_init_notify() - Notify the EPF device that EPC device's core
+ * initialization is completed.
+ * @epc: the EPC device whose core initialization is completed
+ *
+ * Invoke to Notify the EPF device that the EPC device's initialization
+ * is completed.
+ */
+void pci_epc_init_notify(struct pci_epc *epc)
+{
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->core_init)
+ epf->event_ops->core_init(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_init_notify);
+
+/**
+ * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
+ * the BME event from the Root complex
+ * @epc: the EPC device that received the BME event
+ *
+ * Invoke to Notify the EPF device that the EPC device has received the Bus
+ * Master Enable (BME) event from the Root complex
+ */
+void pci_epc_bme_notify(struct pci_epc *epc)
+{
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->bme)
+ epf->event_ops->bme(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+
+/**
+ * pci_epc_destroy() - destroy the EPC device
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the PCI EPC device
+ */
+void pci_epc_destroy(struct pci_epc *epc)
+{
+ pci_ep_cfs_remove_epc_group(epc->group);
+ device_unregister(&epc->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epc_destroy);
+
+/**
+ * devm_pci_epc_destroy() - destroy the EPC device
+ * @dev: device that wants to destroy the EPC
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the devres associated with this
+ * pci_epc and destroy the EPC device.
+ */
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+{
+ int r;
+
+ r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
+ epc);
+ dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+}
+EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
+
+static void pci_epc_release(struct device *dev)
+{
+ kfree(to_pci_epc(dev));
+}
+
+/**
+ * __pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ */
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner)
+{
+ int ret;
+ struct pci_epc *epc;
+
+ if (WARN_ON(!dev)) {
+ ret = -EINVAL;
+ goto err_ret;
+ }
+
+ epc = kzalloc(sizeof(*epc), GFP_KERNEL);
+ if (!epc) {
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+
+ mutex_init(&epc->lock);
+ mutex_init(&epc->list_lock);
+ INIT_LIST_HEAD(&epc->pci_epf);
+
+ device_initialize(&epc->dev);
+ epc->dev.class = pci_epc_class;
+ epc->dev.parent = dev;
+ epc->dev.release = pci_epc_release;
+ epc->ops = ops;
+
+ ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
+ if (ret)
+ goto put_dev;
+
+ ret = device_add(&epc->dev);
+ if (ret)
+ goto put_dev;
+
+ epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
+
+ return epc;
+
+put_dev:
+ put_device(&epc->dev);
+
+err_ret:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__pci_epc_create);
+
+/**
+ * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ * While at that, it also associates the device with the pci_epc using devres.
+ * On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner)
+{
+ struct pci_epc **ptr, *epc;
+
+ ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ epc = __pci_epc_create(dev, ops, owner);
+ if (!IS_ERR(epc)) {
+ *ptr = epc;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return epc;
+}
+EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
+
+static int __init pci_epc_init(void)
+{
+ pci_epc_class = class_create("pci_epc");
+ if (IS_ERR(pci_epc_class)) {
+ pr_err("failed to create pci epc class --> %ld\n",
+ PTR_ERR(pci_epc_class));
+ return PTR_ERR(pci_epc_class);
+ }
+
+ return 0;
+}
+module_init(pci_epc_init);
+
+static void __exit pci_epc_exit(void)
+{
+ class_destroy(pci_epc_class);
+}
+module_exit(pci_epc_exit);
+
+MODULE_DESCRIPTION("PCI EPC Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
new file mode 100644
index 0000000000..a9c028f58d
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Endpoint *Controller* Address Space Management
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+
+/**
+ * pci_epc_mem_get_order() - determine the allocation order of a memory size
+ * @mem: address space of the endpoint controller
+ * @size: the size for which to get the order
+ *
+ * Reimplement get_order() for mem->page_size since the generic get_order
+ * always gets order with a constant PAGE_SIZE.
+ */
+static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size)
+{
+ int order;
+ unsigned int page_shift = ilog2(mem->window.page_size);
+
+ size--;
+ size >>= page_shift;
+#if BITS_PER_LONG == 32
+ order = fls(size);
+#else
+ order = fls64(size);
+#endif
+ return order;
+}
+
+/**
+ * pci_epc_multi_mem_init() - initialize the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_init
+ * @windows: pointer to windows supported by the device
+ * @num_windows: number of windows device supports
+ *
+ * Invoke to initialize the pci_epc_mem structure used by the
+ * endpoint functions to allocate mapped PCI address.
+ */
+int pci_epc_multi_mem_init(struct pci_epc *epc,
+ struct pci_epc_mem_window *windows,
+ unsigned int num_windows)
+{
+ struct pci_epc_mem *mem = NULL;
+ unsigned long *bitmap = NULL;
+ unsigned int page_shift;
+ size_t page_size;
+ int bitmap_size;
+ int pages;
+ int ret;
+ int i;
+
+ epc->num_windows = 0;
+
+ if (!windows || !num_windows)
+ return -EINVAL;
+
+ epc->windows = kcalloc(num_windows, sizeof(*epc->windows), GFP_KERNEL);
+ if (!epc->windows)
+ return -ENOMEM;
+
+ for (i = 0; i < num_windows; i++) {
+ page_size = windows[i].page_size;
+ if (page_size < PAGE_SIZE)
+ page_size = PAGE_SIZE;
+ page_shift = ilog2(page_size);
+ pages = windows[i].size >> page_shift;
+ bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ ret = -ENOMEM;
+ i--;
+ goto err_mem;
+ }
+
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap) {
+ ret = -ENOMEM;
+ kfree(mem);
+ i--;
+ goto err_mem;
+ }
+
+ mem->window.phys_base = windows[i].phys_base;
+ mem->window.size = windows[i].size;
+ mem->window.page_size = page_size;
+ mem->bitmap = bitmap;
+ mem->pages = pages;
+ mutex_init(&mem->lock);
+ epc->windows[i] = mem;
+ }
+
+ epc->mem = epc->windows[0];
+ epc->num_windows = num_windows;
+
+ return 0;
+
+err_mem:
+ for (; i >= 0; i--) {
+ mem = epc->windows[i];
+ kfree(mem->bitmap);
+ kfree(mem);
+ }
+ kfree(epc->windows);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init);
+
+/**
+ * pci_epc_mem_init() - Initialize the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_init
+ * @base: Physical address of the window region
+ * @size: Total Size of the window region
+ * @page_size: Page size of the window region
+ *
+ * Invoke to initialize a single pci_epc_mem structure used by the
+ * endpoint functions to allocate memory for mapping the PCI host memory
+ */
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
+ size_t size, size_t page_size)
+{
+ struct pci_epc_mem_window mem_window;
+
+ mem_window.phys_base = base;
+ mem_window.size = size;
+ mem_window.page_size = page_size;
+
+ return pci_epc_multi_mem_init(epc, &mem_window, 1);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_init);
+
+/**
+ * pci_epc_mem_exit() - cleanup the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_exit
+ *
+ * Invoke to cleanup the pci_epc_mem structure allocated in
+ * pci_epc_mem_init().
+ */
+void pci_epc_mem_exit(struct pci_epc *epc)
+{
+ struct pci_epc_mem *mem;
+ int i;
+
+ if (!epc->num_windows)
+ return;
+
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+ kfree(mem->bitmap);
+ kfree(mem);
+ }
+ kfree(epc->windows);
+
+ epc->windows = NULL;
+ epc->mem = NULL;
+ epc->num_windows = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
+
+/**
+ * pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space
+ * @epc: the EPC device on which memory has to be allocated
+ * @phys_addr: populate the allocated physical address here
+ * @size: the size of the address space that has to be allocated
+ *
+ * Invoke to allocate memory address from the EPC address space. This
+ * is usually done to map the remote RC address into the local system.
+ */
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size)
+{
+ void __iomem *virt_addr = NULL;
+ struct pci_epc_mem *mem;
+ unsigned int page_shift;
+ size_t align_size;
+ int pageno;
+ int order;
+ int i;
+
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+ mutex_lock(&mem->lock);
+ align_size = ALIGN(size, mem->window.page_size);
+ order = pci_epc_mem_get_order(mem, align_size);
+
+ pageno = bitmap_find_free_region(mem->bitmap, mem->pages,
+ order);
+ if (pageno >= 0) {
+ page_shift = ilog2(mem->window.page_size);
+ *phys_addr = mem->window.phys_base +
+ ((phys_addr_t)pageno << page_shift);
+ virt_addr = ioremap(*phys_addr, align_size);
+ if (!virt_addr) {
+ bitmap_release_region(mem->bitmap,
+ pageno, order);
+ mutex_unlock(&mem->lock);
+ continue;
+ }
+ mutex_unlock(&mem->lock);
+ return virt_addr;
+ }
+ mutex_unlock(&mem->lock);
+ }
+
+ return virt_addr;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
+
+static struct pci_epc_mem *pci_epc_get_matching_window(struct pci_epc *epc,
+ phys_addr_t phys_addr)
+{
+ struct pci_epc_mem *mem;
+ int i;
+
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+
+ if (phys_addr >= mem->window.phys_base &&
+ phys_addr < (mem->window.phys_base + mem->window.size))
+ return mem;
+ }
+
+ return NULL;
+}
+
+/**
+ * pci_epc_mem_free_addr() - free the allocated memory address
+ * @epc: the EPC device on which memory was allocated
+ * @phys_addr: the allocated physical address
+ * @virt_addr: virtual address of the allocated mem space
+ * @size: the size of the allocated address space
+ *
+ * Invoke to free the memory allocated using pci_epc_mem_alloc_addr.
+ */
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size)
+{
+ struct pci_epc_mem *mem;
+ unsigned int page_shift;
+ size_t page_size;
+ int pageno;
+ int order;
+
+ mem = pci_epc_get_matching_window(epc, phys_addr);
+ if (!mem) {
+ pr_err("failed to get matching window\n");
+ return;
+ }
+
+ page_size = mem->window.page_size;
+ page_shift = ilog2(page_size);
+ iounmap(virt_addr);
+ pageno = (phys_addr - mem->window.phys_base) >> page_shift;
+ size = ALIGN(size, page_size);
+ order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
+ bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
+
+MODULE_DESCRIPTION("PCI EPC Address Space Management");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
new file mode 100644
index 0000000000..2c32de6679
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Endpoint *Function* (EPF) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static DEFINE_MUTEX(pci_epf_mutex);
+
+static struct bus_type pci_epf_bus_type;
+static const struct device_type pci_epf_type;
+
+/**
+ * pci_epf_unbind() - Notify the function driver that the binding between the
+ * EPF device and EPC device has been lost
+ * @epf: the EPF device which has lost the binding with the EPC device
+ *
+ * Invoke to notify the function driver that the binding between the EPF device
+ * and EPC device has been lost.
+ */
+void pci_epf_unbind(struct pci_epf *epf)
+{
+ struct pci_epf *epf_vf;
+
+ if (!epf->driver) {
+ dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ return;
+ }
+
+ mutex_lock(&epf->lock);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ if (epf_vf->is_bound)
+ epf_vf->driver->ops->unbind(epf_vf);
+ }
+ if (epf->is_bound)
+ epf->driver->ops->unbind(epf);
+ mutex_unlock(&epf->lock);
+ module_put(epf->driver->owner);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unbind);
+
+/**
+ * pci_epf_bind() - Notify the function driver that the EPF device has been
+ * bound to a EPC device
+ * @epf: the EPF device which has been bound to the EPC device
+ *
+ * Invoke to notify the function driver that it has been bound to a EPC device
+ */
+int pci_epf_bind(struct pci_epf *epf)
+{
+ struct device *dev = &epf->dev;
+ struct pci_epf *epf_vf;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
+ int ret;
+
+ if (!epf->driver) {
+ dev_WARN(dev, "epf device not bound to driver\n");
+ return -EINVAL;
+ }
+
+ if (!try_module_get(epf->driver->owner))
+ return -EAGAIN;
+
+ mutex_lock(&epf->lock);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ vfunc_no = epf_vf->vfunc_no;
+
+ if (vfunc_no < 1) {
+ dev_err(dev, "Invalid virtual function number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ epc = epf->epc;
+ func_no = epf->func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epc = epf->sec_epc;
+ func_no = epf->sec_epc_func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epf_vf->func_no = epf->func_no;
+ epf_vf->sec_epc_func_no = epf->sec_epc_func_no;
+ epf_vf->epc = epf->epc;
+ epf_vf->sec_epc = epf->sec_epc;
+ ret = epf_vf->driver->ops->bind(epf_vf);
+ if (ret)
+ goto ret;
+ epf_vf->is_bound = true;
+ }
+
+ ret = epf->driver->ops->bind(epf);
+ if (ret)
+ goto ret;
+ epf->is_bound = true;
+
+ mutex_unlock(&epf->lock);
+ return 0;
+
+ret:
+ mutex_unlock(&epf->lock);
+ pci_epf_unbind(epf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epf_bind);
+
+/**
+ * pci_epf_add_vepf() - associate virtual EP function to physical EP function
+ * @epf_pf: the physical EP function to which the virtual EP function should be
+ * associated
+ * @epf_vf: the virtual EP function to be added
+ *
+ * A physical endpoint function can be associated with multiple virtual
+ * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint
+ * function to a physical PCI endpoint function.
+ */
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ u32 vfunc_no;
+
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return -EINVAL;
+
+ if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf)
+ return -EBUSY;
+
+ if (epf_pf->sec_epc || epf_vf->sec_epc)
+ return -EBUSY;
+
+ mutex_lock(&epf_pf->lock);
+ vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map,
+ BITS_PER_LONG);
+ if (vfunc_no >= BITS_PER_LONG) {
+ mutex_unlock(&epf_pf->lock);
+ return -EINVAL;
+ }
+
+ set_bit(vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->vfunc_no = vfunc_no;
+
+ epf_vf->epf_pf = epf_pf;
+ epf_vf->is_vf = true;
+
+ list_add_tail(&epf_vf->list, &epf_pf->pci_vepf);
+ mutex_unlock(&epf_pf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_add_vepf);
+
+/**
+ * pci_epf_remove_vepf() - remove virtual EP function from physical EP function
+ * @epf_pf: the physical EP function from which the virtual EP function should
+ * be removed
+ * @epf_vf: the virtual EP function to be removed
+ *
+ * Invoke to remove a virtual endpoint function from the physical endpoint
+ * function.
+ */
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return;
+
+ mutex_lock(&epf_pf->lock);
+ clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ list_del(&epf_vf->list);
+ mutex_unlock(&epf_pf->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
+
+/**
+ * pci_epf_free_space() - free the allocated PCI EPF register space
+ * @epf: the EPF device from whom to free the memory
+ * @addr: the virtual address of the PCI EPF register space
+ * @bar: the BAR number corresponding to the register space
+ * @type: Identifies if the allocated space is for primary EPC or secondary EPC
+ *
+ * Invoke to free the allocated PCI EPF register space.
+ */
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type)
+{
+ struct device *dev;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *epc;
+
+ if (!addr)
+ return;
+
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ dev = epc->dev.parent;
+ dma_free_coherent(dev, epf_bar[bar].size, addr,
+ epf_bar[bar].phys_addr);
+
+ epf_bar[bar].phys_addr = 0;
+ epf_bar[bar].addr = NULL;
+ epf_bar[bar].size = 0;
+ epf_bar[bar].barno = 0;
+ epf_bar[bar].flags = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_free_space);
+
+/**
+ * pci_epf_alloc_space() - allocate memory for the PCI EPF register space
+ * @epf: the EPF device to whom allocate the memory
+ * @size: the size of the memory that has to be allocated
+ * @bar: the BAR number corresponding to the allocated register space
+ * @align: alignment size for the allocation region
+ * @type: Identifies if the allocation is for primary EPC or secondary EPC
+ *
+ * Invoke to allocate memory for the PCI EPF register space.
+ */
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ size_t align, enum pci_epc_interface_type type)
+{
+ struct pci_epf_bar *epf_bar;
+ dma_addr_t phys_addr;
+ struct pci_epc *epc;
+ struct device *dev;
+ void *space;
+
+ if (size < 128)
+ size = 128;
+
+ if (align)
+ size = ALIGN(size, align);
+ else
+ size = roundup_pow_of_two(size);
+
+ if (type == PRIMARY_INTERFACE) {
+ epc = epf->epc;
+ epf_bar = epf->bar;
+ } else {
+ epc = epf->sec_epc;
+ epf_bar = epf->sec_epc_bar;
+ }
+
+ dev = epc->dev.parent;
+ space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ if (!space) {
+ dev_err(dev, "failed to allocate mem space\n");
+ return NULL;
+ }
+
+ epf_bar[bar].phys_addr = phys_addr;
+ epf_bar[bar].addr = space;
+ epf_bar[bar].size = size;
+ epf_bar[bar].barno = bar;
+ epf_bar[bar].flags |= upper_32_bits(size) ?
+ PCI_BASE_ADDRESS_MEM_TYPE_64 :
+ PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ return space;
+}
+EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+
+static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
+{
+ struct config_group *group, *tmp;
+
+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+ return;
+
+ mutex_lock(&pci_epf_mutex);
+ list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
+ pci_ep_cfs_remove_epf_group(group);
+ list_del(&driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+}
+
+/**
+ * pci_epf_unregister_driver() - unregister the PCI EPF driver
+ * @driver: the PCI EPF driver that has to be unregistered
+ *
+ * Invoke to unregister the PCI EPF driver.
+ */
+void pci_epf_unregister_driver(struct pci_epf_driver *driver)
+{
+ pci_epf_remove_cfs(driver);
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+
+static int pci_epf_add_cfs(struct pci_epf_driver *driver)
+{
+ struct config_group *group;
+ const struct pci_epf_device_id *id;
+
+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+ return 0;
+
+ INIT_LIST_HEAD(&driver->epf_group);
+
+ id = driver->id_table;
+ while (id->name[0]) {
+ group = pci_ep_cfs_add_epf_group(id->name);
+ if (IS_ERR(group)) {
+ pci_epf_remove_cfs(driver);
+ return PTR_ERR(group);
+ }
+
+ mutex_lock(&pci_epf_mutex);
+ list_add_tail(&group->group_entry, &driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+ id++;
+ }
+
+ return 0;
+}
+
+/**
+ * __pci_epf_register_driver() - register a new PCI EPF driver
+ * @driver: structure representing PCI EPF driver
+ * @owner: the owner of the module that registers the PCI EPF driver
+ *
+ * Invoke to register a new PCI EPF driver.
+ */
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+ struct module *owner)
+{
+ int ret;
+
+ if (!driver->ops)
+ return -EINVAL;
+
+ if (!driver->ops->bind || !driver->ops->unbind)
+ return -EINVAL;
+
+ driver->driver.bus = &pci_epf_bus_type;
+ driver->driver.owner = owner;
+
+ ret = driver_register(&driver->driver);
+ if (ret)
+ return ret;
+
+ pci_epf_add_cfs(driver);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pci_epf_register_driver);
+
+/**
+ * pci_epf_destroy() - destroy the created PCI EPF device
+ * @epf: the PCI EPF device that has to be destroyed.
+ *
+ * Invoke to destroy the PCI EPF device created by invoking pci_epf_create().
+ */
+void pci_epf_destroy(struct pci_epf *epf)
+{
+ device_unregister(&epf->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epf_destroy);
+
+/**
+ * pci_epf_create() - create a new PCI EPF device
+ * @name: the name of the PCI EPF device. This name will be used to bind the
+ * EPF device to a EPF driver
+ *
+ * Invoke to create a new PCI EPF device by providing the name of the function
+ * device.
+ */
+struct pci_epf *pci_epf_create(const char *name)
+{
+ int ret;
+ struct pci_epf *epf;
+ struct device *dev;
+ int len;
+
+ epf = kzalloc(sizeof(*epf), GFP_KERNEL);
+ if (!epf)
+ return ERR_PTR(-ENOMEM);
+
+ len = strchrnul(name, '.') - name;
+ epf->name = kstrndup(name, len, GFP_KERNEL);
+ if (!epf->name) {
+ kfree(epf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* VFs are numbered starting with 1. So set BIT(0) by default */
+ epf->vfunction_num_map = 1;
+ INIT_LIST_HEAD(&epf->pci_vepf);
+
+ dev = &epf->dev;
+ device_initialize(dev);
+ dev->bus = &pci_epf_bus_type;
+ dev->type = &pci_epf_type;
+ mutex_init(&epf->lock);
+
+ ret = dev_set_name(dev, "%s", name);
+ if (ret) {
+ put_device(dev);
+ return ERR_PTR(ret);
+ }
+
+ ret = device_add(dev);
+ if (ret) {
+ put_device(dev);
+ return ERR_PTR(ret);
+ }
+
+ return epf;
+}
+EXPORT_SYMBOL_GPL(pci_epf_create);
+
+static void pci_epf_dev_release(struct device *dev)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+
+ kfree(epf->name);
+ kfree(epf);
+}
+
+static const struct device_type pci_epf_type = {
+ .release = pci_epf_dev_release,
+};
+
+static const struct pci_epf_device_id *
+pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
+{
+ while (id->name[0]) {
+ if (strcmp(epf->name, id->name) == 0)
+ return id;
+ id++;
+ }
+
+ return NULL;
+}
+
+static int pci_epf_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(drv);
+
+ if (driver->id_table)
+ return !!pci_epf_match_id(driver->id_table, epf);
+
+ return !strcmp(epf->name, drv->name);
+}
+
+static int pci_epf_device_probe(struct device *dev)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+ if (!driver->probe)
+ return -ENODEV;
+
+ epf->driver = driver;
+
+ return driver->probe(epf, pci_epf_match_id(driver->id_table, epf));
+}
+
+static void pci_epf_device_remove(struct device *dev)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+ if (driver->remove)
+ driver->remove(epf);
+ epf->driver = NULL;
+}
+
+static struct bus_type pci_epf_bus_type = {
+ .name = "pci-epf",
+ .match = pci_epf_device_match,
+ .probe = pci_epf_device_probe,
+ .remove = pci_epf_device_remove,
+};
+
+static int __init pci_epf_init(void)
+{
+ int ret;
+
+ ret = bus_register(&pci_epf_bus_type);
+ if (ret) {
+ pr_err("failed to register pci epf bus --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_init);
+
+static void __exit pci_epf_exit(void)
+{
+ bus_unregister(&pci_epf_bus_type);
+}
+module_exit(pci_epf_exit);
+
+MODULE_DESCRIPTION("PCI EPF Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");