diff options
Diffstat (limited to 'drivers/pci/endpoint/functions')
-rw-r--r-- | drivers/pci/endpoint/functions/Kconfig | 39 | ||||
-rw-r--r-- | drivers/pci/endpoint/functions/Makefile | 8 | ||||
-rw-r--r-- | drivers/pci/endpoint/functions/pci-epf-ntb.c | 2147 | ||||
-rw-r--r-- | drivers/pci/endpoint/functions/pci-epf-test.c | 1078 | ||||
-rw-r--r-- | drivers/pci/endpoint/functions/pci-epf-vntb.c | 1468 |
5 files changed, 4740 insertions, 0 deletions
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig new file mode 100644 index 000000000..8efb6a869 --- /dev/null +++ b/drivers/pci/endpoint/functions/Kconfig @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# PCI Endpoint Functions +# + +config PCI_EPF_TEST + tristate "PCI Endpoint Test driver" + depends on PCI_ENDPOINT + select CRC32 + help + Enable this configuration option to enable the test driver + for PCI Endpoint. + + If in doubt, say "N" to disable Endpoint test driver. + +config PCI_EPF_NTB + tristate "PCI Endpoint NTB driver" + depends on PCI_ENDPOINT + select CONFIGFS_FS + help + Select this configuration option to enable the Non-Transparent + Bridge (NTB) driver for PCI Endpoint. NTB driver implements NTB + controller functionality using multiple PCIe endpoint instances. + It can support NTB endpoint function devices created using + device tree. + + If in doubt, say "N" to disable Endpoint NTB driver. + +config PCI_EPF_VNTB + tristate "PCI Endpoint Virtual NTB driver" + depends on PCI_ENDPOINT + depends on NTB + select CONFIGFS_FS + help + Select this configuration option to enable the Non-Transparent + Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB + between PCI Root Port and PCIe Endpoint. + + If in doubt, say "N" to disable Endpoint NTB driver. diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile new file mode 100644 index 000000000..5c13001de --- /dev/null +++ b/drivers/pci/endpoint/functions/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for PCI Endpoint Functions +# + +obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o +obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o +obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c new file mode 100644 index 000000000..9a00448c7 --- /dev/null +++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c @@ -0,0 +1,2147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Endpoint Function Driver to implement Non-Transparent Bridge functionality + * + * Copyright (C) 2020 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +/* + * The PCI NTB function driver configures the SoC with multiple PCIe Endpoint + * (EP) controller instances (see diagram below) in such a way that + * transactions from one EP controller are routed to the other EP controller. + * Once PCI NTB function driver configures the SoC with multiple EP instances, + * HOST1 and HOST2 can communicate with each other using SoC as a bridge. + * + * +-------------+ +-------------+ + * | | | | + * | HOST1 | | HOST2 | + * | | | | + * +------^------+ +------^------+ + * | | + * | | + * +---------|-------------------------------------------------|---------+ + * | +------v------+ +------v------+ | + * | | | | | | + * | | EP | | EP | | + * | | CONTROLLER1 | | CONTROLLER2 | | + * | | <-----------------------------------> | | + * | | | | | | + * | | | | | | + * | | | SoC With Multiple EP Instances | | | + * | | | (Configured using NTB Function) | | | + * | +-------------+ +-------------+ | + * +---------------------------------------------------------------------+ + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> + +static struct workqueue_struct *kpcintb_workqueue; + +#define COMMAND_CONFIGURE_DOORBELL 1 +#define COMMAND_TEARDOWN_DOORBELL 2 +#define COMMAND_CONFIGURE_MW 3 +#define COMMAND_TEARDOWN_MW 4 +#define COMMAND_LINK_UP 5 +#define COMMAND_LINK_DOWN 6 + +#define COMMAND_STATUS_OK 1 +#define COMMAND_STATUS_ERROR 2 + +#define LINK_STATUS_UP BIT(0) + +#define SPAD_COUNT 64 +#define DB_COUNT 4 +#define NTB_MW_OFFSET 2 +#define DB_COUNT_MASK GENMASK(15, 0) +#define MSIX_ENABLE BIT(16) +#define MAX_DB_COUNT 32 +#define MAX_MW 4 + +enum epf_ntb_bar { + BAR_CONFIG, + BAR_PEER_SPAD, + BAR_DB_MW1, + BAR_MW2, + BAR_MW3, + BAR_MW4, +}; + +struct epf_ntb { + u32 num_mws; + u32 db_count; + u32 spad_count; + struct pci_epf *epf; + u64 mws_size[MAX_MW]; + struct config_group group; + struct epf_ntb_epc *epc[2]; +}; + +#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group) + +struct epf_ntb_epc { + u8 func_no; + u8 vfunc_no; + bool linkup; + bool is_msix; + int msix_bar; + u32 spad_size; + struct pci_epc *epc; + struct epf_ntb *epf_ntb; + void __iomem *mw_addr[6]; + size_t msix_table_offset; + struct epf_ntb_ctrl *reg; + struct pci_epf_bar *epf_bar; + enum pci_barno epf_ntb_bar[6]; + struct delayed_work cmd_handler; + enum pci_epc_interface_type type; + const struct pci_epc_features *epc_features; +}; + +struct epf_ntb_ctrl { + u32 command; + u32 argument; + u16 command_status; + u16 link_status; + u32 topology; + u64 addr; + u64 size; + u32 num_mws; + u32 mw1_offset; + u32 spad_offset; + u32 spad_count; + u32 db_entry_size; + u32 db_data[MAX_DB_COUNT]; + u32 db_offset[MAX_DB_COUNT]; +} __packed; + +static struct pci_epf_header epf_ntb_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .baseclass_code = PCI_BASE_CLASS_MEMORY, + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +/** + * epf_ntb_link_up() - Raise link_up interrupt to both the hosts + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @link_up: true or false indicating Link is UP or Down + * + * Once NTB function in HOST1 and the NTB function in HOST2 invoke + * ntb_link_enable(), this NTB function driver will trigger a link event to + * the NTB client in both the hosts. + */ +static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) +{ + enum pci_epc_interface_type type; + enum pci_epc_irq_type irq_type; + struct epf_ntb_epc *ntb_epc; + struct epf_ntb_ctrl *ctrl; + struct pci_epc *epc; + u8 func_no, vfunc_no; + bool is_msix; + int ret; + + for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) { + ntb_epc = ntb->epc[type]; + epc = ntb_epc->epc; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + is_msix = ntb_epc->is_msix; + ctrl = ntb_epc->reg; + if (link_up) + ctrl->link_status |= LINK_STATUS_UP; + else + ctrl->link_status &= ~LINK_STATUS_UP; + irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI; + ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1); + if (ret) { + dev_err(&epc->dev, + "%s intf: Failed to raise Link Up IRQ\n", + pci_epc_interface_string(type)); + return ret; + } + } + + return 0; +} + +/** + * epf_ntb_configure_mw() - Configure the Outbound Address Space for one host + * to access the memory window of other host + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * +-----------------+ +---->+----------------+-----------+-----------------+ + * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 | + * +-----------------+ | +----------------+ +-----------------+ + * | BAR1 | | | Doorbell 2 +---------+ | | + * +-----------------+----+ +----------------+ | | | + * | BAR2 | | Doorbell 3 +-------+ | +-----------------+ + * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 | + * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+ + * +-----------------+ | |----------------+ | | | | + * | BAR4 | | | | | | +-----------------+ + * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3|| + * | BAR5 | | | | | | +-----------------+ + * +-----------------+ +---->-----------------+ | | | | + * EP CONTROLLER 1 | | | | +-----------------+ + * | | | +---->+ MSI|X ADDRESS 4 | + * +----------------+ | +-----------------+ + * (A) EP CONTROLLER 2 | | | + * (OB SPACE) | | | + * +-------> MW1 | + * | | + * | | + * (B) +-----------------+ + * | | + * | | + * | | + * | | + * | | + * +-----------------+ + * PCI Address Space + * (Managed by HOST2) + * + * This function performs stage (B) in the above diagram (see MW1) i.e., map OB + * address space of memory window to PCI address space. + * + * This operation requires 3 parameters + * 1) Address in the outbound address space + * 2) Address in the PCI Address space + * 3) Size of the address region to be mapped + * + * The address in the outbound address space (for MW1, MW2, MW3 and MW4) is + * stored in epf_bar corresponding to BAR_DB_MW1 for MW1 and BAR_MW2, BAR_MW3 + * BAR_MW4 for rest of the BARs of epf_ntb_epc that is connected to HOST1. This + * is populated in epf_ntb_alloc_peer_mem() in this driver. + * + * The address and size of the PCI address region that has to be mapped would + * be provided by HOST2 in ctrl->addr and ctrl->size of epf_ntb_epc that is + * connected to HOST2. + * + * Please note Memory window1 (MW1) and Doorbell registers together will be + * mapped to a single BAR (BAR2) above for 32-bit BARs. The exact BAR that's + * used for Memory window (MW) can be obtained from epf_ntb_bar[BAR_DB_MW1], + * epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2], epf_ntb_bar[BAR_MW2]. + */ +static int epf_ntb_configure_mw(struct epf_ntb *ntb, + enum pci_epc_interface_type type, u32 mw) +{ + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + struct pci_epf_bar *peer_epf_bar; + enum pci_barno peer_barno; + struct epf_ntb_ctrl *ctrl; + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + struct pci_epc *epc; + u64 addr, size; + int ret = 0; + + ntb_epc = ntb->epc[type]; + epc = ntb_epc->epc; + + peer_ntb_epc = ntb->epc[!type]; + peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET]; + peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno]; + + phys_addr = peer_epf_bar->phys_addr; + ctrl = ntb_epc->reg; + addr = ctrl->addr; + size = ctrl->size; + if (mw + NTB_MW_OFFSET == BAR_DB_MW1) + phys_addr += ctrl->mw1_offset; + + if (size > ntb->mws_size[mw]) { + dev_err(&epc->dev, + "%s intf: MW: %d Req Sz:%llxx > Supported Sz:%llx\n", + pci_epc_interface_string(type), mw, size, + ntb->mws_size[mw]); + ret = -EINVAL; + goto err_invalid_size; + } + + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + + ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size); + if (ret) + dev_err(&epc->dev, + "%s intf: Failed to map memory window %d address\n", + pci_epc_interface_string(type), mw); + +err_invalid_size: + + return ret; +} + +/** + * epf_ntb_teardown_mw() - Teardown the configured OB ATU + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using + * pci_epc_unmap_addr() + */ +static void epf_ntb_teardown_mw(struct epf_ntb *ntb, + enum pci_epc_interface_type type, u32 mw) +{ + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + struct pci_epf_bar *peer_epf_bar; + enum pci_barno peer_barno; + struct epf_ntb_ctrl *ctrl; + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + struct pci_epc *epc; + + ntb_epc = ntb->epc[type]; + epc = ntb_epc->epc; + + peer_ntb_epc = ntb->epc[!type]; + peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET]; + peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno]; + + phys_addr = peer_epf_bar->phys_addr; + ctrl = ntb_epc->reg; + if (mw + NTB_MW_OFFSET == BAR_DB_MW1) + phys_addr += ctrl->mw1_offset; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + + pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr); +} + +/** + * epf_ntb_configure_msi() - Map OB address space to MSI address + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * @db_count: Number of doorbell interrupts to map + * + *+-----------------+ +----->+----------------+-----------+-----------------+ + *| BAR0 | | | Doorbell 1 +---+-------> MSI ADDRESS | + *+-----------------+ | +----------------+ | +-----------------+ + *| BAR1 | | | Doorbell 2 +---+ | | + *+-----------------+----+ +----------------+ | | | + *| BAR2 | | Doorbell 3 +---+ | | + *+-----------------+----+ +----------------+ | | | + *| BAR3 | | | Doorbell 4 +---+ | | + *+-----------------+ | |----------------+ | | + *| BAR4 | | | | | | + *+-----------------+ | | MW1 | | | + *| BAR5 | | | | | | + *+-----------------+ +----->-----------------+ | | + * EP CONTROLLER 1 | | | | + * | | | | + * +----------------+ +-----------------+ + * (A) EP CONTROLLER 2 | | + * (OB SPACE) | | + * | MW1 | + * | | + * | | + * (B) +-----------------+ + * | | + * | | + * | | + * | | + * | | + * +-----------------+ + * PCI Address Space + * (Managed by HOST2) + * + * + * This function performs stage (B) in the above diagram (see Doorbell 1, + * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to + * doorbell to MSI address in PCI address space. + * + * This operation requires 3 parameters + * 1) Address reserved for doorbell in the outbound address space + * 2) MSI-X address in the PCIe Address space + * 3) Number of MSI-X interrupts that has to be configured + * + * The address in the outbound address space (for the Doorbell) is stored in + * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to + * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along + * with address for MW1. + * + * pci_epc_map_msi_irq() takes the MSI address from MSI capability register + * and maps the OB address (obtained in epf_ntb_alloc_peer_mem()) to the MSI + * address. + * + * epf_ntb_configure_msi() also stores the MSI data to raise each interrupt + * in db_data of the peer's control region. This helps the peer to raise + * doorbell of the other host by writing db_data to the BAR corresponding to + * BAR_DB_MW1. + */ +static int epf_ntb_configure_msi(struct epf_ntb *ntb, + enum pci_epc_interface_type type, u16 db_count) +{ + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + u32 db_entry_size, db_data, db_offset; + struct pci_epf_bar *peer_epf_bar; + struct epf_ntb_ctrl *peer_ctrl; + enum pci_barno peer_barno; + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + struct pci_epc *epc; + int ret, i; + + ntb_epc = ntb->epc[type]; + epc = ntb_epc->epc; + + peer_ntb_epc = ntb->epc[!type]; + peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1]; + peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno]; + peer_ctrl = peer_ntb_epc->reg; + db_entry_size = peer_ctrl->db_entry_size; + + phys_addr = peer_epf_bar->phys_addr; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + + ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count, + db_entry_size, &db_data, &db_offset); + if (ret) { + dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n", + pci_epc_interface_string(type)); + return ret; + } + + for (i = 0; i < db_count; i++) { + peer_ctrl->db_data[i] = db_data | i; + peer_ctrl->db_offset[i] = db_offset; + } + + return 0; +} + +/** + * epf_ntb_configure_msix() - Map OB address space to MSI-X address + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * @db_count: Number of doorbell interrupts to map + * + *+-----------------+ +----->+----------------+-----------+-----------------+ + *| BAR0 | | | Doorbell 1 +-----------> MSI-X ADDRESS 1 | + *+-----------------+ | +----------------+ +-----------------+ + *| BAR1 | | | Doorbell 2 +---------+ | | + *+-----------------+----+ +----------------+ | | | + *| BAR2 | | Doorbell 3 +-------+ | +-----------------+ + *+-----------------+----+ +----------------+ | +-> MSI-X ADDRESS 2 | + *| BAR3 | | | Doorbell 4 +-----+ | +-----------------+ + *+-----------------+ | |----------------+ | | | | + *| BAR4 | | | | | | +-----------------+ + *+-----------------+ | | MW1 + | +-->+ MSI-X ADDRESS 3|| + *| BAR5 | | | | | +-----------------+ + *+-----------------+ +----->-----------------+ | | | + * EP CONTROLLER 1 | | | +-----------------+ + * | | +---->+ MSI-X ADDRESS 4 | + * +----------------+ +-----------------+ + * (A) EP CONTROLLER 2 | | + * (OB SPACE) | | + * | MW1 | + * | | + * | | + * (B) +-----------------+ + * | | + * | | + * | | + * | | + * | | + * +-----------------+ + * PCI Address Space + * (Managed by HOST2) + * + * This function performs stage (B) in the above diagram (see Doorbell 1, + * Doorbell 2, Doorbell 3, Doorbell 4) i.e map OB address space corresponding to + * doorbell to MSI-X address in PCI address space. + * + * This operation requires 3 parameters + * 1) Address reserved for doorbell in the outbound address space + * 2) MSI-X address in the PCIe Address space + * 3) Number of MSI-X interrupts that has to be configured + * + * The address in the outbound address space (for the Doorbell) is stored in + * epf_bar corresponding to BAR_DB_MW1 of epf_ntb_epc that is connected to + * HOST1. This is populated in epf_ntb_alloc_peer_mem() in this driver along + * with address for MW1. + * + * The MSI-X address is in the MSI-X table of EP CONTROLLER 2 and + * the count of doorbell is in ctrl->argument of epf_ntb_epc that is connected + * to HOST2. MSI-X table is stored memory mapped to ntb_epc->msix_bar and the + * offset is in ntb_epc->msix_table_offset. From this epf_ntb_configure_msix() + * gets the MSI-X address and data. + * + * epf_ntb_configure_msix() also stores the MSI-X data to raise each interrupt + * in db_data of the peer's control region. This helps the peer to raise + * doorbell of the other host by writing db_data to the BAR corresponding to + * BAR_DB_MW1. + */ +static int epf_ntb_configure_msix(struct epf_ntb *ntb, + enum pci_epc_interface_type type, + u16 db_count) +{ + const struct pci_epc_features *epc_features; + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + struct pci_epf_bar *peer_epf_bar, *epf_bar; + struct pci_epf_msix_tbl *msix_tbl; + struct epf_ntb_ctrl *peer_ctrl; + u32 db_entry_size, msg_data; + enum pci_barno peer_barno; + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + struct pci_epc *epc; + size_t align; + u64 msg_addr; + int ret, i; + + ntb_epc = ntb->epc[type]; + epc = ntb_epc->epc; + + epf_bar = &ntb_epc->epf_bar[ntb_epc->msix_bar]; + msix_tbl = epf_bar->addr + ntb_epc->msix_table_offset; + + peer_ntb_epc = ntb->epc[!type]; + peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1]; + peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno]; + phys_addr = peer_epf_bar->phys_addr; + peer_ctrl = peer_ntb_epc->reg; + epc_features = ntb_epc->epc_features; + align = epc_features->align; + + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + db_entry_size = peer_ctrl->db_entry_size; + + for (i = 0; i < db_count; i++) { + msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align); + msg_data = msix_tbl[i].msg_data; + ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr, + db_entry_size); + if (ret) { + dev_err(&epc->dev, + "%s intf: Failed to configure MSI-X IRQ\n", + pci_epc_interface_string(type)); + return ret; + } + phys_addr = phys_addr + db_entry_size; + peer_ctrl->db_data[i] = msg_data; + peer_ctrl->db_offset[i] = msix_tbl[i].msg_addr & (align - 1); + } + ntb_epc->is_msix = true; + + return 0; +} + +/** + * epf_ntb_configure_db() - Configure the Outbound Address Space for one host + * to ring the doorbell of other host + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * @db_count: Count of the number of doorbells that has to be configured + * @msix: Indicates whether MSI-X or MSI should be used + * + * Invokes epf_ntb_configure_msix() or epf_ntb_configure_msi() required for + * one HOST to ring the doorbell of other HOST. + */ +static int epf_ntb_configure_db(struct epf_ntb *ntb, + enum pci_epc_interface_type type, + u16 db_count, bool msix) +{ + struct epf_ntb_epc *ntb_epc; + struct pci_epc *epc; + int ret; + + if (db_count > MAX_DB_COUNT) + return -EINVAL; + + ntb_epc = ntb->epc[type]; + epc = ntb_epc->epc; + + if (msix) + ret = epf_ntb_configure_msix(ntb, type, db_count); + else + ret = epf_ntb_configure_msi(ntb, type, db_count); + + if (ret) + dev_err(&epc->dev, "%s intf: Failed to configure DB\n", + pci_epc_interface_string(type)); + + return ret; +} + +/** + * epf_ntb_teardown_db() - Unmap address in OB address space to MSI/MSI-X + * address + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Invoke pci_epc_unmap_addr() to unmap OB address to MSI/MSI-X address. + */ +static void +epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type) +{ + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + struct pci_epf_bar *peer_epf_bar; + enum pci_barno peer_barno; + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + struct pci_epc *epc; + + ntb_epc = ntb->epc[type]; + epc = ntb_epc->epc; + + peer_ntb_epc = ntb->epc[!type]; + peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_DB_MW1]; + peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno]; + phys_addr = peer_epf_bar->phys_addr; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + + pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr); +} + +/** + * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host + * @work: work_struct for the two epf_ntb_epc (PRIMARY and SECONDARY) + * + * Workqueue function that gets invoked for the two epf_ntb_epc + * periodically (once every 5ms) to see if it has received any commands + * from NTB host. The host can send commands to configure doorbell or + * configure memory window or to update link status. + */ +static void epf_ntb_cmd_handler(struct work_struct *work) +{ + enum pci_epc_interface_type type; + struct epf_ntb_epc *ntb_epc; + struct epf_ntb_ctrl *ctrl; + u32 command, argument; + struct epf_ntb *ntb; + struct device *dev; + u16 db_count; + bool is_msix; + int ret; + + ntb_epc = container_of(work, struct epf_ntb_epc, cmd_handler.work); + ctrl = ntb_epc->reg; + command = ctrl->command; + if (!command) + goto reset_handler; + argument = ctrl->argument; + + ctrl->command = 0; + ctrl->argument = 0; + + ctrl = ntb_epc->reg; + type = ntb_epc->type; + ntb = ntb_epc->epf_ntb; + dev = &ntb->epf->dev; + + switch (command) { + case COMMAND_CONFIGURE_DOORBELL: + db_count = argument & DB_COUNT_MASK; + is_msix = argument & MSIX_ENABLE; + ret = epf_ntb_configure_db(ntb, type, db_count, is_msix); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_DOORBELL: + epf_ntb_teardown_db(ntb, type); + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_CONFIGURE_MW: + ret = epf_ntb_configure_mw(ntb, type, argument); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_MW: + epf_ntb_teardown_mw(ntb, type, argument); + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_LINK_UP: + ntb_epc->linkup = true; + if (ntb->epc[PRIMARY_INTERFACE]->linkup && + ntb->epc[SECONDARY_INTERFACE]->linkup) { + ret = epf_ntb_link_up(ntb, true); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + goto reset_handler; + } + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_LINK_DOWN: + ntb_epc->linkup = false; + ret = epf_ntb_link_up(ntb, false); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + default: + dev_err(dev, "%s intf UNKNOWN command: %d\n", + pci_epc_interface_string(type), command); + break; + } + +reset_handler: + queue_delayed_work(kpcintb_workqueue, &ntb_epc->cmd_handler, + msecs_to_jiffies(5)); +} + +/** + * epf_ntb_peer_spad_bar_clear() - Clear Peer Scratchpad BAR + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. + * + *+-----------------+------->+------------------+ +-----------------+ + *| BAR0 | | CONFIG REGION | | BAR0 | + *+-----------------+----+ +------------------+<-------+-----------------+ + *| BAR1 | | |SCRATCHPAD REGION | | BAR1 | + *+-----------------+ +-->+------------------+<-------+-----------------+ + *| BAR2 | Local Memory | BAR2 | + *+-----------------+ +-----------------+ + *| BAR3 | | BAR3 | + *+-----------------+ +-----------------+ + *| BAR4 | | BAR4 | + *+-----------------+ +-----------------+ + *| BAR5 | | BAR5 | + *+-----------------+ +-----------------+ + * EP CONTROLLER 1 EP CONTROLLER 2 + * + * Clear BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad + * region. While BAR1 is the default peer scratchpad BAR, an NTB could have + * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs). + * This function can get the exact BAR used for peer scratchpad from + * epf_ntb_bar[BAR_PEER_SPAD]. + * + * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function + * gets the address of peer scratchpad from + * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG]. + */ +static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct pci_epc *epc; + + epc = ntb_epc->epc; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD]; + epf_bar = &ntb_epc->epf_bar[barno]; + pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar); +} + +/** + * epf_ntb_peer_spad_bar_set() - Set peer scratchpad BAR + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + *+-----------------+------->+------------------+ +-----------------+ + *| BAR0 | | CONFIG REGION | | BAR0 | + *+-----------------+----+ +------------------+<-------+-----------------+ + *| BAR1 | | |SCRATCHPAD REGION | | BAR1 | + *+-----------------+ +-->+------------------+<-------+-----------------+ + *| BAR2 | Local Memory | BAR2 | + *+-----------------+ +-----------------+ + *| BAR3 | | BAR3 | + *+-----------------+ +-----------------+ + *| BAR4 | | BAR4 | + *+-----------------+ +-----------------+ + *| BAR5 | | BAR5 | + *+-----------------+ +-----------------+ + * EP CONTROLLER 1 EP CONTROLLER 2 + * + * Set BAR1 of EP CONTROLLER 2 which contains the HOST2's peer scratchpad + * region. While BAR1 is the default peer scratchpad BAR, an NTB could have + * other BARs for peer scratchpad (because of 64-bit BARs or reserved BARs). + * This function can get the exact BAR used for peer scratchpad from + * epf_ntb_bar[BAR_PEER_SPAD]. + * + * Since HOST2's peer scratchpad is also HOST1's self scratchpad, this function + * gets the address of peer scratchpad from + * peer_ntb_epc->epf_ntb_bar[BAR_CONFIG]. + */ +static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + struct pci_epf_bar *peer_epf_bar, *epf_bar; + enum pci_barno peer_barno, barno; + u32 peer_spad_offset; + u8 func_no, vfunc_no; + struct pci_epc *epc; + struct device *dev; + int ret; + + dev = &ntb->epf->dev; + + peer_ntb_epc = ntb->epc[!type]; + peer_barno = peer_ntb_epc->epf_ntb_bar[BAR_CONFIG]; + peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno]; + + ntb_epc = ntb->epc[type]; + barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD]; + epf_bar = &ntb_epc->epf_bar[barno]; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + epc = ntb_epc->epc; + + peer_spad_offset = peer_ntb_epc->reg->spad_offset; + epf_bar->phys_addr = peer_epf_bar->phys_addr + peer_spad_offset; + epf_bar->size = peer_ntb_epc->spad_size; + epf_bar->barno = barno; + epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32; + + ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "%s intf: peer SPAD BAR set failed\n", + pci_epc_interface_string(type)); + return ret; + } + + return 0; +} + +/** + * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. + * + * +-----------------+------->+------------------+ +-----------------+ + * | BAR0 | | CONFIG REGION | | BAR0 | + * +-----------------+----+ +------------------+<-------+-----------------+ + * | BAR1 | | |SCRATCHPAD REGION | | BAR1 | + * +-----------------+ +-->+------------------+<-------+-----------------+ + * | BAR2 | Local Memory | BAR2 | + * +-----------------+ +-----------------+ + * | BAR3 | | BAR3 | + * +-----------------+ +-----------------+ + * | BAR4 | | BAR4 | + * +-----------------+ +-----------------+ + * | BAR5 | | BAR5 | + * +-----------------+ +-----------------+ + * EP CONTROLLER 1 EP CONTROLLER 2 + * + * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * self scratchpad region (removes inbound ATU configuration). While BAR0 is + * the default self scratchpad BAR, an NTB could have other BARs for self + * scratchpad (because of reserved BARs). This function can get the exact BAR + * used for self scratchpad from epf_ntb_bar[BAR_CONFIG]. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. Also note HOST2's peer + * scratchpad is HOST1's self scratchpad. + */ +static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct pci_epc *epc; + + epc = ntb_epc->epc; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + barno = ntb_epc->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb_epc->epf_bar[barno]; + pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar); +} + +/** + * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. + * + * +-----------------+------->+------------------+ +-----------------+ + * | BAR0 | | CONFIG REGION | | BAR0 | + * +-----------------+----+ +------------------+<-------+-----------------+ + * | BAR1 | | |SCRATCHPAD REGION | | BAR1 | + * +-----------------+ +-->+------------------+<-------+-----------------+ + * | BAR2 | Local Memory | BAR2 | + * +-----------------+ +-----------------+ + * | BAR3 | | BAR3 | + * +-----------------+ +-----------------+ + * | BAR4 | | BAR4 | + * +-----------------+ +-----------------+ + * | BAR5 | | BAR5 | + * +-----------------+ +-----------------+ + * EP CONTROLLER 1 EP CONTROLLER 2 + * + * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * self scratchpad region. While BAR0 is the default self scratchpad BAR, an + * NTB could have other BARs for self scratchpad (because of reserved BARs). + * This function can get the exact BAR used for self scratchpad from + * epf_ntb_bar[BAR_CONFIG]. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. Also note HOST2's peer + * scratchpad is HOST1's self scratchpad. + */ +static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct epf_ntb *ntb; + struct pci_epc *epc; + struct device *dev; + int ret; + + ntb = ntb_epc->epf_ntb; + dev = &ntb->epf->dev; + + epc = ntb_epc->epc; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + barno = ntb_epc->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb_epc->epf_bar[barno]; + + ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n", + pci_epc_interface_string(ntb_epc->type)); + return ret; + } + + return 0; +} + +/** + * epf_ntb_config_spad_bar_free() - Free the physical memory associated with + * config + scratchpad region + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * +-----------------+------->+------------------+ +-----------------+ + * | BAR0 | | CONFIG REGION | | BAR0 | + * +-----------------+----+ +------------------+<-------+-----------------+ + * | BAR1 | | |SCRATCHPAD REGION | | BAR1 | + * +-----------------+ +-->+------------------+<-------+-----------------+ + * | BAR2 | Local Memory | BAR2 | + * +-----------------+ +-----------------+ + * | BAR3 | | BAR3 | + * +-----------------+ +-----------------+ + * | BAR4 | | BAR4 | + * +-----------------+ +-----------------+ + * | BAR5 | | BAR5 | + * +-----------------+ +-----------------+ + * EP CONTROLLER 1 EP CONTROLLER 2 + * + * Free the Local Memory mentioned in the above diagram. After invoking this + * function, any of config + self scratchpad region of HOST1 or peer scratchpad + * region of HOST2 should not be accessed. + */ +static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb) +{ + enum pci_epc_interface_type type; + struct epf_ntb_epc *ntb_epc; + enum pci_barno barno; + struct pci_epf *epf; + + epf = ntb->epf; + for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) { + ntb_epc = ntb->epc[type]; + barno = ntb_epc->epf_ntb_bar[BAR_CONFIG]; + if (ntb_epc->reg) + pci_epf_free_space(epf, ntb_epc->reg, barno, type); + } +} + +/** + * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad + * region + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * +-----------------+------->+------------------+ +-----------------+ + * | BAR0 | | CONFIG REGION | | BAR0 | + * +-----------------+----+ +------------------+<-------+-----------------+ + * | BAR1 | | |SCRATCHPAD REGION | | BAR1 | + * +-----------------+ +-->+------------------+<-------+-----------------+ + * | BAR2 | Local Memory | BAR2 | + * +-----------------+ +-----------------+ + * | BAR3 | | BAR3 | + * +-----------------+ +-----------------+ + * | BAR4 | | BAR4 | + * +-----------------+ +-----------------+ + * | BAR5 | | BAR5 | + * +-----------------+ +-----------------+ + * EP CONTROLLER 1 EP CONTROLLER 2 + * + * Allocate the Local Memory mentioned in the above diagram. The size of + * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION + * is obtained from "spad-count" configfs entry. + * + * The size of both config region and scratchpad region has to be aligned, + * since the scratchpad region will also be mapped as PEER SCRATCHPAD of + * other host using a separate BAR. + */ +static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + const struct pci_epc_features *peer_epc_features, *epc_features; + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + size_t msix_table_size, pba_size, align; + enum pci_barno peer_barno, barno; + struct epf_ntb_ctrl *ctrl; + u32 spad_size, ctrl_size; + u64 size, peer_size; + struct pci_epf *epf; + struct device *dev; + bool msix_capable; + u32 spad_count; + void *base; + + epf = ntb->epf; + dev = &epf->dev; + ntb_epc = ntb->epc[type]; + + epc_features = ntb_epc->epc_features; + barno = ntb_epc->epf_ntb_bar[BAR_CONFIG]; + size = epc_features->bar_fixed_size[barno]; + align = epc_features->align; + + peer_ntb_epc = ntb->epc[!type]; + peer_epc_features = peer_ntb_epc->epc_features; + peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD]; + peer_size = peer_epc_features->bar_fixed_size[peer_barno]; + + /* Check if epc_features is populated incorrectly */ + if ((!IS_ALIGNED(size, align))) + return -EINVAL; + + spad_count = ntb->spad_count; + + ctrl_size = sizeof(struct epf_ntb_ctrl); + spad_size = spad_count * 4; + + msix_capable = epc_features->msix_capable; + if (msix_capable) { + msix_table_size = PCI_MSIX_ENTRY_SIZE * ntb->db_count; + ctrl_size = ALIGN(ctrl_size, 8); + ntb_epc->msix_table_offset = ctrl_size; + ntb_epc->msix_bar = barno; + /* Align to QWORD or 8 Bytes */ + pba_size = ALIGN(DIV_ROUND_UP(ntb->db_count, 8), 8); + ctrl_size = ctrl_size + msix_table_size + pba_size; + } + + if (!align) { + ctrl_size = roundup_pow_of_two(ctrl_size); + spad_size = roundup_pow_of_two(spad_size); + } else { + ctrl_size = ALIGN(ctrl_size, align); + spad_size = ALIGN(spad_size, align); + } + + if (peer_size) { + if (peer_size < spad_size) + spad_count = peer_size / 4; + spad_size = peer_size; + } + + /* + * In order to make sure SPAD offset is aligned to its size, + * expand control region size to the size of SPAD if SPAD size + * is greater than control region size. + */ + if (spad_size > ctrl_size) + ctrl_size = spad_size; + + if (!size) + size = ctrl_size + spad_size; + else if (size < ctrl_size + spad_size) + return -EINVAL; + + base = pci_epf_alloc_space(epf, size, barno, align, type); + if (!base) { + dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n", + pci_epc_interface_string(type)); + return -ENOMEM; + } + + ntb_epc->reg = base; + + ctrl = ntb_epc->reg; + ctrl->spad_offset = ctrl_size; + ctrl->spad_count = spad_count; + ctrl->num_mws = ntb->num_mws; + ctrl->db_entry_size = align ? align : 4; + ntb_epc->spad_size = spad_size; + + return 0; +} + +/** + * epf_ntb_config_spad_bar_alloc_interface() - Allocate memory for config + + * scratchpad region for each of PRIMARY and SECONDARY interface + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Wrapper for epf_ntb_config_spad_bar_alloc() which allocates memory for + * config + scratchpad region for a specific interface + */ +static int epf_ntb_config_spad_bar_alloc_interface(struct epf_ntb *ntb) +{ + enum pci_epc_interface_type type; + struct device *dev; + int ret; + + dev = &ntb->epf->dev; + + for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) { + ret = epf_ntb_config_spad_bar_alloc(ntb, type); + if (ret) { + dev_err(dev, "%s intf: Config/SPAD BAR alloc failed\n", + pci_epc_interface_string(type)); + return ret; + } + } + + return 0; +} + +/** + * epf_ntb_free_peer_mem() - Free memory allocated in peers outbound address + * space + * @ntb_epc: EPC associated with one of the HOST which holds peers outbound + * address regions + * + * +-----------------+ +---->+----------------+-----------+-----------------+ + * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 | + * +-----------------+ | +----------------+ +-----------------+ + * | BAR1 | | | Doorbell 2 +---------+ | | + * +-----------------+----+ +----------------+ | | | + * | BAR2 | | Doorbell 3 +-------+ | +-----------------+ + * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 | + * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+ + * +-----------------+ | |----------------+ | | | | + * | BAR4 | | | | | | +-----------------+ + * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3|| + * | BAR5 | | | | | | +-----------------+ + * +-----------------+ +---->-----------------+ | | | | + * EP CONTROLLER 1 | | | | +-----------------+ + * | | | +---->+ MSI|X ADDRESS 4 | + * +----------------+ | +-----------------+ + * (A) EP CONTROLLER 2 | | | + * (OB SPACE) | | | + * +-------> MW1 | + * | | + * | | + * (B) +-----------------+ + * | | + * | | + * | | + * | | + * | | + * +-----------------+ + * PCI Address Space + * (Managed by HOST2) + * + * Free memory allocated in EP CONTROLLER 2 (OB SPACE) in the above diagram. + * It'll free Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3, + * MW4). + */ +static void epf_ntb_free_peer_mem(struct epf_ntb_epc *ntb_epc) +{ + struct pci_epf_bar *epf_bar; + void __iomem *mw_addr; + phys_addr_t phys_addr; + enum epf_ntb_bar bar; + enum pci_barno barno; + struct pci_epc *epc; + size_t size; + + epc = ntb_epc->epc; + + for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) { + barno = ntb_epc->epf_ntb_bar[bar]; + mw_addr = ntb_epc->mw_addr[barno]; + epf_bar = &ntb_epc->epf_bar[barno]; + phys_addr = epf_bar->phys_addr; + size = epf_bar->size; + if (mw_addr) { + pci_epc_mem_free_addr(epc, phys_addr, mw_addr, size); + ntb_epc->mw_addr[barno] = NULL; + } + } +} + +/** + * epf_ntb_db_mw_bar_clear() - Clear doorbell and memory BAR + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address + * + * +-----------------+ +---->+----------------+-----------+-----------------+ + * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 | + * +-----------------+ | +----------------+ +-----------------+ + * | BAR1 | | | Doorbell 2 +---------+ | | + * +-----------------+----+ +----------------+ | | | + * | BAR2 | | Doorbell 3 +-------+ | +-----------------+ + * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 | + * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+ + * +-----------------+ | |----------------+ | | | | + * | BAR4 | | | | | | +-----------------+ + * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3|| + * | BAR5 | | | | | | +-----------------+ + * +-----------------+ +---->-----------------+ | | | | + * EP CONTROLLER 1 | | | | +-----------------+ + * | | | +---->+ MSI|X ADDRESS 4 | + * +----------------+ | +-----------------+ + * (A) EP CONTROLLER 2 | | | + * (OB SPACE) | | | + * +-------> MW1 | + * | | + * | | + * (B) +-----------------+ + * | | + * | | + * | | + * | | + * | | + * +-----------------+ + * PCI Address Space + * (Managed by HOST2) + * + * Clear doorbell and memory BARs (remove inbound ATU configuration). In the above + * diagram it clears BAR2 TO BAR5 of EP CONTROLLER 1 (Doorbell BAR, MW1 BAR, MW2 + * BAR, MW3 BAR and MW4 BAR). + */ +static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc) +{ + struct pci_epf_bar *epf_bar; + enum epf_ntb_bar bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct pci_epc *epc; + + epc = ntb_epc->epc; + + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + + for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) { + barno = ntb_epc->epf_ntb_bar[bar]; + epf_bar = &ntb_epc->epf_bar[barno]; + pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar); + } +} + +/** + * epf_ntb_db_mw_bar_cleanup() - Clear doorbell/memory BAR and free memory + * allocated in peers outbound address space + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Wrapper for epf_ntb_db_mw_bar_clear() to clear HOST1's BAR and + * epf_ntb_free_peer_mem() which frees up HOST2 outbound memory. + */ +static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + + ntb_epc = ntb->epc[type]; + peer_ntb_epc = ntb->epc[!type]; + + epf_ntb_db_mw_bar_clear(ntb_epc); + epf_ntb_free_peer_mem(peer_ntb_epc); +} + +/** + * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Configure MSI/MSI-X capability for each interface with number of + * interrupts equal to "db_count" configfs entry. + */ +static int epf_ntb_configure_interrupt(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + const struct pci_epc_features *epc_features; + bool msix_capable, msi_capable; + struct epf_ntb_epc *ntb_epc; + u8 func_no, vfunc_no; + struct pci_epc *epc; + struct device *dev; + u32 db_count; + int ret; + + ntb_epc = ntb->epc[type]; + dev = &ntb->epf->dev; + + epc_features = ntb_epc->epc_features; + msix_capable = epc_features->msix_capable; + msi_capable = epc_features->msi_capable; + + if (!(msix_capable || msi_capable)) { + dev_err(dev, "MSI or MSI-X is required for doorbell\n"); + return -EINVAL; + } + + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + + db_count = ntb->db_count; + if (db_count > MAX_DB_COUNT) { + dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT); + return -EINVAL; + } + + ntb->db_count = db_count; + epc = ntb_epc->epc; + + if (msi_capable) { + ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count); + if (ret) { + dev_err(dev, "%s intf: MSI configuration failed\n", + pci_epc_interface_string(type)); + return ret; + } + } + + if (msix_capable) { + ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count, + ntb_epc->msix_bar, + ntb_epc->msix_table_offset); + if (ret) { + dev_err(dev, "MSI configuration failed\n"); + return ret; + } + } + + return 0; +} + +/** + * epf_ntb_alloc_peer_mem() - Allocate memory in peer's outbound address space + * @dev: The PCI device. + * @ntb_epc: EPC associated with one of the HOST whose BAR holds peer's outbound + * address + * @bar: BAR of @ntb_epc in for which memory has to be allocated (could be + * BAR_DB_MW1, BAR_MW2, BAR_MW3, BAR_MW4) + * @peer_ntb_epc: EPC associated with HOST whose outbound address space is + * used by @ntb_epc + * @size: Size of the address region that has to be allocated in peers OB SPACE + * + * + * +-----------------+ +---->+----------------+-----------+-----------------+ + * | BAR0 | | | Doorbell 1 +-----------> MSI|X ADDRESS 1 | + * +-----------------+ | +----------------+ +-----------------+ + * | BAR1 | | | Doorbell 2 +---------+ | | + * +-----------------+----+ +----------------+ | | | + * | BAR2 | | Doorbell 3 +-------+ | +-----------------+ + * +-----------------+----+ +----------------+ | +-> MSI|X ADDRESS 2 | + * | BAR3 | | | Doorbell 4 +-----+ | +-----------------+ + * +-----------------+ | |----------------+ | | | | + * | BAR4 | | | | | | +-----------------+ + * +-----------------+ | | MW1 +---+ | +-->+ MSI|X ADDRESS 3|| + * | BAR5 | | | | | | +-----------------+ + * +-----------------+ +---->-----------------+ | | | | + * EP CONTROLLER 1 | | | | +-----------------+ + * | | | +---->+ MSI|X ADDRESS 4 | + * +----------------+ | +-----------------+ + * (A) EP CONTROLLER 2 | | | + * (OB SPACE) | | | + * +-------> MW1 | + * | | + * | | + * (B) +-----------------+ + * | | + * | | + * | | + * | | + * | | + * +-----------------+ + * PCI Address Space + * (Managed by HOST2) + * + * Allocate memory in OB space of EP CONTROLLER 2 in the above diagram. Allocate + * for Doorbell 1, Doorbell 2, Doorbell 3, Doorbell 4, MW1 (and MW2, MW3, MW4). + */ +static int epf_ntb_alloc_peer_mem(struct device *dev, + struct epf_ntb_epc *ntb_epc, + enum epf_ntb_bar bar, + struct epf_ntb_epc *peer_ntb_epc, + size_t size) +{ + const struct pci_epc_features *epc_features; + struct pci_epf_bar *epf_bar; + struct pci_epc *peer_epc; + phys_addr_t phys_addr; + void __iomem *mw_addr; + enum pci_barno barno; + size_t align; + + epc_features = ntb_epc->epc_features; + align = epc_features->align; + + if (size < 128) + size = 128; + + if (align) + size = ALIGN(size, align); + else + size = roundup_pow_of_two(size); + + peer_epc = peer_ntb_epc->epc; + mw_addr = pci_epc_mem_alloc_addr(peer_epc, &phys_addr, size); + if (!mw_addr) { + dev_err(dev, "%s intf: Failed to allocate OB address\n", + pci_epc_interface_string(peer_ntb_epc->type)); + return -ENOMEM; + } + + barno = ntb_epc->epf_ntb_bar[bar]; + epf_bar = &ntb_epc->epf_bar[barno]; + ntb_epc->mw_addr[barno] = mw_addr; + + epf_bar->phys_addr = phys_addr; + epf_bar->size = size; + epf_bar->barno = barno; + epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32; + + return 0; +} + +/** + * epf_ntb_db_mw_bar_init() - Configure Doorbell and Memory window BARs + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Wrapper for epf_ntb_alloc_peer_mem() and pci_epc_set_bar() that allocates + * memory in OB address space of HOST2 and configures BAR of HOST1 + */ +static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + const struct pci_epc_features *epc_features; + struct epf_ntb_epc *peer_ntb_epc, *ntb_epc; + struct pci_epf_bar *epf_bar; + struct epf_ntb_ctrl *ctrl; + u32 num_mws, db_count; + enum epf_ntb_bar bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct pci_epc *epc; + struct device *dev; + size_t align; + int ret, i; + u64 size; + + ntb_epc = ntb->epc[type]; + peer_ntb_epc = ntb->epc[!type]; + + dev = &ntb->epf->dev; + epc_features = ntb_epc->epc_features; + align = epc_features->align; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + epc = ntb_epc->epc; + num_mws = ntb->num_mws; + db_count = ntb->db_count; + + for (bar = BAR_DB_MW1, i = 0; i < num_mws; bar++, i++) { + if (bar == BAR_DB_MW1) { + align = align ? align : 4; + size = db_count * align; + size = ALIGN(size, ntb->mws_size[i]); + ctrl = ntb_epc->reg; + ctrl->mw1_offset = size; + size += ntb->mws_size[i]; + } else { + size = ntb->mws_size[i]; + } + + ret = epf_ntb_alloc_peer_mem(dev, ntb_epc, bar, + peer_ntb_epc, size); + if (ret) { + dev_err(dev, "%s intf: DoorBell mem alloc failed\n", + pci_epc_interface_string(type)); + goto err_alloc_peer_mem; + } + + barno = ntb_epc->epf_ntb_bar[bar]; + epf_bar = &ntb_epc->epf_bar[barno]; + + ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "%s intf: DoorBell BAR set failed\n", + pci_epc_interface_string(type)); + goto err_alloc_peer_mem; + } + } + + return 0; + +err_alloc_peer_mem: + epf_ntb_db_mw_bar_cleanup(ntb, type); + + return ret; +} + +/** + * epf_ntb_epc_destroy_interface() - Cleanup NTB EPC interface + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Unbind NTB function device from EPC and relinquish reference to pci_epc + * for each of the interface. + */ +static void epf_ntb_epc_destroy_interface(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + struct epf_ntb_epc *ntb_epc; + struct pci_epc *epc; + struct pci_epf *epf; + + if (type < 0) + return; + + epf = ntb->epf; + ntb_epc = ntb->epc[type]; + if (!ntb_epc) + return; + epc = ntb_epc->epc; + pci_epc_remove_epf(epc, epf, type); + pci_epc_put(epc); +} + +/** + * epf_ntb_epc_destroy() - Cleanup NTB EPC interface + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces + */ +static void epf_ntb_epc_destroy(struct epf_ntb *ntb) +{ + enum pci_epc_interface_type type; + + for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) + epf_ntb_epc_destroy_interface(ntb, type); +} + +/** + * epf_ntb_epc_create_interface() - Create and initialize NTB EPC interface + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @epc: struct pci_epc to which a particular NTB interface should be associated + * @type: PRIMARY interface or SECONDARY interface + * + * Allocate memory for NTB EPC interface and initialize it. + */ +static int epf_ntb_epc_create_interface(struct epf_ntb *ntb, + struct pci_epc *epc, + enum pci_epc_interface_type type) +{ + const struct pci_epc_features *epc_features; + struct pci_epf_bar *epf_bar; + struct epf_ntb_epc *ntb_epc; + u8 func_no, vfunc_no; + struct pci_epf *epf; + struct device *dev; + + dev = &ntb->epf->dev; + + ntb_epc = devm_kzalloc(dev, sizeof(*ntb_epc), GFP_KERNEL); + if (!ntb_epc) + return -ENOMEM; + + epf = ntb->epf; + vfunc_no = epf->vfunc_no; + if (type == PRIMARY_INTERFACE) { + func_no = epf->func_no; + epf_bar = epf->bar; + } else { + func_no = epf->sec_epc_func_no; + epf_bar = epf->sec_epc_bar; + } + + ntb_epc->linkup = false; + ntb_epc->epc = epc; + ntb_epc->func_no = func_no; + ntb_epc->vfunc_no = vfunc_no; + ntb_epc->type = type; + ntb_epc->epf_bar = epf_bar; + ntb_epc->epf_ntb = ntb; + + epc_features = pci_epc_get_features(epc, func_no, vfunc_no); + if (!epc_features) + return -EINVAL; + ntb_epc->epc_features = epc_features; + + ntb->epc[type] = ntb_epc; + + return 0; +} + +/** + * epf_ntb_epc_create() - Create and initialize NTB EPC interface + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Get a reference to EPC device and bind NTB function device to that EPC + * for each of the interface. It is also a wrapper to + * epf_ntb_epc_create_interface() to allocate memory for NTB EPC interface + * and initialize it + */ +static int epf_ntb_epc_create(struct epf_ntb *ntb) +{ + struct pci_epf *epf; + struct device *dev; + int ret; + + epf = ntb->epf; + dev = &epf->dev; + + ret = epf_ntb_epc_create_interface(ntb, epf->epc, PRIMARY_INTERFACE); + if (ret) { + dev_err(dev, "PRIMARY intf: Fail to create NTB EPC\n"); + return ret; + } + + ret = epf_ntb_epc_create_interface(ntb, epf->sec_epc, + SECONDARY_INTERFACE); + if (ret) { + dev_err(dev, "SECONDARY intf: Fail to create NTB EPC\n"); + goto err_epc_create; + } + + return 0; + +err_epc_create: + epf_ntb_epc_destroy_interface(ntb, PRIMARY_INTERFACE); + + return ret; +} + +/** + * epf_ntb_init_epc_bar_interface() - Identify BARs to be used for each of + * the NTB constructs (scratchpad region, doorbell, memorywindow) + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Identify the free BARs to be used for each of BAR_CONFIG, BAR_PEER_SPAD, + * BAR_DB_MW1, BAR_MW2, BAR_MW3 and BAR_MW4. + */ +static int epf_ntb_init_epc_bar_interface(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + const struct pci_epc_features *epc_features; + struct epf_ntb_epc *ntb_epc; + enum pci_barno barno; + enum epf_ntb_bar bar; + struct device *dev; + u32 num_mws; + int i; + + barno = BAR_0; + ntb_epc = ntb->epc[type]; + num_mws = ntb->num_mws; + dev = &ntb->epf->dev; + epc_features = ntb_epc->epc_features; + + /* These are required BARs which are mandatory for NTB functionality */ + for (bar = BAR_CONFIG; bar <= BAR_DB_MW1; bar++, barno++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + dev_err(dev, "%s intf: Fail to get NTB function BAR\n", + pci_epc_interface_string(type)); + return barno; + } + ntb_epc->epf_ntb_bar[bar] = barno; + } + + /* These are optional BARs which don't impact NTB functionality */ + for (bar = BAR_MW2, i = 1; i < num_mws; bar++, barno++, i++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + ntb->num_mws = i; + dev_dbg(dev, "BAR not available for > MW%d\n", i + 1); + } + ntb_epc->epf_ntb_bar[bar] = barno; + } + + return 0; +} + +/** + * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB + * constructs (scratchpad region, doorbell, memorywindow) + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Wrapper to epf_ntb_init_epc_bar_interface() to identify the free BARs + * to be used for each of BAR_CONFIG, BAR_PEER_SPAD, BAR_DB_MW1, BAR_MW2, + * BAR_MW3 and BAR_MW4 for all the interfaces. + */ +static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) +{ + enum pci_epc_interface_type type; + struct device *dev; + int ret; + + dev = &ntb->epf->dev; + for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) { + ret = epf_ntb_init_epc_bar_interface(ntb, type); + if (ret) { + dev_err(dev, "Fail to init EPC bar for %s interface\n", + pci_epc_interface_string(type)); + return ret; + } + } + + return 0; +} + +/** + * epf_ntb_epc_init_interface() - Initialize NTB interface + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Wrapper to initialize a particular EPC interface and start the workqueue + * to check for commands from host. This function will write to the + * EP controller HW for configuring it. + */ +static int epf_ntb_epc_init_interface(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + struct epf_ntb_epc *ntb_epc; + u8 func_no, vfunc_no; + struct pci_epc *epc; + struct pci_epf *epf; + struct device *dev; + int ret; + + ntb_epc = ntb->epc[type]; + epf = ntb->epf; + dev = &epf->dev; + epc = ntb_epc->epc; + func_no = ntb_epc->func_no; + vfunc_no = ntb_epc->vfunc_no; + + ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]); + if (ret) { + dev_err(dev, "%s intf: Config/self SPAD BAR init failed\n", + pci_epc_interface_string(type)); + return ret; + } + + ret = epf_ntb_peer_spad_bar_set(ntb, type); + if (ret) { + dev_err(dev, "%s intf: Peer SPAD BAR init failed\n", + pci_epc_interface_string(type)); + goto err_peer_spad_bar_init; + } + + ret = epf_ntb_configure_interrupt(ntb, type); + if (ret) { + dev_err(dev, "%s intf: Interrupt configuration failed\n", + pci_epc_interface_string(type)); + goto err_peer_spad_bar_init; + } + + ret = epf_ntb_db_mw_bar_init(ntb, type); + if (ret) { + dev_err(dev, "%s intf: DB/MW BAR init failed\n", + pci_epc_interface_string(type)); + goto err_db_mw_bar_init; + } + + if (vfunc_no <= 1) { + ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header); + if (ret) { + dev_err(dev, "%s intf: Configuration header write failed\n", + pci_epc_interface_string(type)); + goto err_write_header; + } + } + + INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler); + queue_work(kpcintb_workqueue, &ntb->epc[type]->cmd_handler.work); + + return 0; + +err_write_header: + epf_ntb_db_mw_bar_cleanup(ntb, type); + +err_db_mw_bar_init: + epf_ntb_peer_spad_bar_clear(ntb->epc[type]); + +err_peer_spad_bar_init: + epf_ntb_config_sspad_bar_clear(ntb->epc[type]); + + return ret; +} + +/** + * epf_ntb_epc_cleanup_interface() - Cleanup NTB interface + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * @type: PRIMARY interface or SECONDARY interface + * + * Wrapper to cleanup a particular NTB interface. + */ +static void epf_ntb_epc_cleanup_interface(struct epf_ntb *ntb, + enum pci_epc_interface_type type) +{ + struct epf_ntb_epc *ntb_epc; + + if (type < 0) + return; + + ntb_epc = ntb->epc[type]; + cancel_delayed_work(&ntb_epc->cmd_handler); + epf_ntb_db_mw_bar_cleanup(ntb, type); + epf_ntb_peer_spad_bar_clear(ntb_epc); + epf_ntb_config_sspad_bar_clear(ntb_epc); +} + +/** + * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Wrapper to cleanup all NTB interfaces. + */ +static void epf_ntb_epc_cleanup(struct epf_ntb *ntb) +{ + enum pci_epc_interface_type type; + + for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) + epf_ntb_epc_cleanup_interface(ntb, type); +} + +/** + * epf_ntb_epc_init() - Initialize all NTB interfaces + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Wrapper to initialize all NTB interface and start the workqueue + * to check for commands from host. + */ +static int epf_ntb_epc_init(struct epf_ntb *ntb) +{ + enum pci_epc_interface_type type; + struct device *dev; + int ret; + + dev = &ntb->epf->dev; + + for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) { + ret = epf_ntb_epc_init_interface(ntb, type); + if (ret) { + dev_err(dev, "%s intf: Failed to initialize\n", + pci_epc_interface_string(type)); + goto err_init_type; + } + } + + return 0; + +err_init_type: + epf_ntb_epc_cleanup_interface(ntb, type - 1); + + return ret; +} + +/** + * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality + * @epf: NTB endpoint function device + * + * Initialize both the endpoint controllers associated with NTB function device. + * Invoked when a primary interface or secondary interface is bound to EPC + * device. This function will succeed only when EPC is bound to both the + * interfaces. + */ +static int epf_ntb_bind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct device *dev = &epf->dev; + int ret; + + if (!epf->epc) { + dev_dbg(dev, "PRIMARY EPC interface not yet bound\n"); + return 0; + } + + if (!epf->sec_epc) { + dev_dbg(dev, "SECONDARY EPC interface not yet bound\n"); + return 0; + } + + ret = epf_ntb_epc_create(ntb); + if (ret) { + dev_err(dev, "Failed to create NTB EPC\n"); + return ret; + } + + ret = epf_ntb_init_epc_bar(ntb); + if (ret) { + dev_err(dev, "Failed to create NTB EPC\n"); + goto err_bar_init; + } + + ret = epf_ntb_config_spad_bar_alloc_interface(ntb); + if (ret) { + dev_err(dev, "Failed to allocate BAR memory\n"); + goto err_bar_alloc; + } + + ret = epf_ntb_epc_init(ntb); + if (ret) { + dev_err(dev, "Failed to initialize EPC\n"); + goto err_bar_alloc; + } + + epf_set_drvdata(epf, ntb); + + return 0; + +err_bar_alloc: + epf_ntb_config_spad_bar_free(ntb); + +err_bar_init: + epf_ntb_epc_destroy(ntb); + + return ret; +} + +/** + * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind() + * @epf: NTB endpoint function device + * + * Cleanup the initialization from epf_ntb_bind() + */ +static void epf_ntb_unbind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + + epf_ntb_epc_cleanup(ntb); + epf_ntb_config_spad_bar_free(ntb); + epf_ntb_epc_destroy(ntb); +} + +#define EPF_NTB_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + \ + return sysfs_emit(page, "%d\n", ntb->_name); \ +} + +#define EPF_NTB_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + u32 val; \ + \ + if (kstrtou32(page, 0, &val) < 0) \ + return -EINVAL; \ + \ + ntb->_name = val; \ + \ + return len; \ +} + +#define EPF_NTB_MW_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + int win_no; \ + \ + sscanf(#_name, "mw%d", &win_no); \ + \ + return sysfs_emit(page, "%lld\n", ntb->mws_size[win_no - 1]); \ +} + +#define EPF_NTB_MW_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + struct device *dev = &ntb->epf->dev; \ + int win_no; \ + u64 val; \ + \ + if (kstrtou64(page, 0, &val) < 0) \ + return -EINVAL; \ + \ + if (sscanf(#_name, "mw%d", &win_no) != 1) \ + return -EINVAL; \ + \ + if (ntb->num_mws < win_no) { \ + dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \ + return -EINVAL; \ + } \ + \ + ntb->mws_size[win_no - 1] = val; \ + \ + return len; \ +} + +static ssize_t epf_ntb_num_mws_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct epf_ntb *ntb = to_epf_ntb(group); + u32 val; + + if (kstrtou32(page, 0, &val) < 0) + return -EINVAL; + + if (val > MAX_MW) + return -EINVAL; + + ntb->num_mws = val; + + return len; +} + +EPF_NTB_R(spad_count) +EPF_NTB_W(spad_count) +EPF_NTB_R(db_count) +EPF_NTB_W(db_count) +EPF_NTB_R(num_mws) +EPF_NTB_MW_R(mw1) +EPF_NTB_MW_W(mw1) +EPF_NTB_MW_R(mw2) +EPF_NTB_MW_W(mw2) +EPF_NTB_MW_R(mw3) +EPF_NTB_MW_W(mw3) +EPF_NTB_MW_R(mw4) +EPF_NTB_MW_W(mw4) + +CONFIGFS_ATTR(epf_ntb_, spad_count); +CONFIGFS_ATTR(epf_ntb_, db_count); +CONFIGFS_ATTR(epf_ntb_, num_mws); +CONFIGFS_ATTR(epf_ntb_, mw1); +CONFIGFS_ATTR(epf_ntb_, mw2); +CONFIGFS_ATTR(epf_ntb_, mw3); +CONFIGFS_ATTR(epf_ntb_, mw4); + +static struct configfs_attribute *epf_ntb_attrs[] = { + &epf_ntb_attr_spad_count, + &epf_ntb_attr_db_count, + &epf_ntb_attr_num_mws, + &epf_ntb_attr_mw1, + &epf_ntb_attr_mw2, + &epf_ntb_attr_mw3, + &epf_ntb_attr_mw4, + NULL, +}; + +static const struct config_item_type ntb_group_type = { + .ct_attrs = epf_ntb_attrs, + .ct_owner = THIS_MODULE, +}; + +/** + * epf_ntb_add_cfs() - Add configfs directory specific to NTB + * @epf: NTB endpoint function device + * @group: A pointer to the config_group structure referencing a group of + * config_items of a specific type that belong to a specific sub-system. + * + * Add configfs directory specific to NTB. This directory will hold + * NTB specific properties like db_count, spad_count, num_mws etc., + */ +static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf, + struct config_group *group) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct config_group *ntb_group = &ntb->group; + struct device *dev = &epf->dev; + + config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type); + + return ntb_group; +} + +/** + * epf_ntb_probe() - Probe NTB function driver + * @epf: NTB endpoint function device + * + * Probe NTB function driver when endpoint function bus detects a NTB + * endpoint function. + */ +static int epf_ntb_probe(struct pci_epf *epf) +{ + struct epf_ntb *ntb; + struct device *dev; + + dev = &epf->dev; + + ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL); + if (!ntb) + return -ENOMEM; + + epf->header = &epf_ntb_header; + ntb->epf = epf; + epf_set_drvdata(epf, ntb); + + return 0; +} + +static struct pci_epf_ops epf_ntb_ops = { + .bind = epf_ntb_bind, + .unbind = epf_ntb_unbind, + .add_cfs = epf_ntb_add_cfs, +}; + +static const struct pci_epf_device_id epf_ntb_ids[] = { + { + .name = "pci_epf_ntb", + }, + {}, +}; + +static struct pci_epf_driver epf_ntb_driver = { + .driver.name = "pci_epf_ntb", + .probe = epf_ntb_probe, + .id_table = epf_ntb_ids, + .ops = &epf_ntb_ops, + .owner = THIS_MODULE, +}; + +static int __init epf_ntb_init(void) +{ + int ret; + + kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM | + WQ_HIGHPRI, 0); + ret = pci_epf_register_driver(&epf_ntb_driver); + if (ret) { + destroy_workqueue(kpcintb_workqueue); + pr_err("Failed to register pci epf ntb driver --> %d\n", ret); + return ret; + } + + return 0; +} +module_init(epf_ntb_init); + +static void __exit epf_ntb_exit(void) +{ + pci_epf_unregister_driver(&epf_ntb_driver); + destroy_workqueue(kpcintb_workqueue); +} +module_exit(epf_ntb_exit); + +MODULE_DESCRIPTION("PCI EPF NTB DRIVER"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c new file mode 100644 index 000000000..2e8a4de2a --- /dev/null +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -0,0 +1,1078 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test driver to test endpoint functionality + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci_ids.h> +#include <linux/random.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci_regs.h> + +#define IRQ_TYPE_LEGACY 0 +#define IRQ_TYPE_MSI 1 +#define IRQ_TYPE_MSIX 2 + +#define COMMAND_RAISE_LEGACY_IRQ BIT(0) +#define COMMAND_RAISE_MSI_IRQ BIT(1) +#define COMMAND_RAISE_MSIX_IRQ BIT(2) +#define COMMAND_READ BIT(3) +#define COMMAND_WRITE BIT(4) +#define COMMAND_COPY BIT(5) + +#define STATUS_READ_SUCCESS BIT(0) +#define STATUS_READ_FAIL BIT(1) +#define STATUS_WRITE_SUCCESS BIT(2) +#define STATUS_WRITE_FAIL BIT(3) +#define STATUS_COPY_SUCCESS BIT(4) +#define STATUS_COPY_FAIL BIT(5) +#define STATUS_IRQ_RAISED BIT(6) +#define STATUS_SRC_ADDR_INVALID BIT(7) +#define STATUS_DST_ADDR_INVALID BIT(8) + +#define FLAG_USE_DMA BIT(0) + +#define TIMER_RESOLUTION 1 + +static struct workqueue_struct *kpcitest_workqueue; + +struct pci_epf_test { + void *reg[PCI_STD_NUM_BARS]; + struct pci_epf *epf; + enum pci_barno test_reg_bar; + size_t msix_table_offset; + struct delayed_work cmd_handler; + struct dma_chan *dma_chan_tx; + struct dma_chan *dma_chan_rx; + struct dma_chan *transfer_chan; + dma_cookie_t transfer_cookie; + enum dma_status transfer_status; + struct completion transfer_complete; + bool dma_supported; + bool dma_private; + const struct pci_epc_features *epc_features; +}; + +struct pci_epf_test_reg { + u32 magic; + u32 command; + u32 status; + u64 src_addr; + u64 dst_addr; + u32 size; + u32 checksum; + u32 irq_type; + u32 irq_number; + u32 flags; +} __packed; + +static struct pci_epf_header test_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .baseclass_code = PCI_CLASS_OTHERS, + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; + +static void pci_epf_test_dma_callback(void *param) +{ + struct pci_epf_test *epf_test = param; + struct dma_tx_state state; + + epf_test->transfer_status = + dmaengine_tx_status(epf_test->transfer_chan, + epf_test->transfer_cookie, &state); + if (epf_test->transfer_status == DMA_COMPLETE || + epf_test->transfer_status == DMA_ERROR) + complete(&epf_test->transfer_complete); +} + +/** + * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer + * data between PCIe EP and remote PCIe RC + * @epf_test: the EPF test device that performs the data transfer operation + * @dma_dst: The destination address of the data transfer. It can be a physical + * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. + * @dma_src: The source address of the data transfer. It can be a physical + * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. + * @len: The size of the data transfer + * @dma_remote: remote RC physical address + * @dir: DMA transfer direction + * + * Function that uses dmaengine API to transfer data between PCIe EP and remote + * PCIe RC. The source and destination address can be a physical address given + * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. + * + * The function returns '0' on success and negative value on failure. + */ +static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test, + dma_addr_t dma_dst, dma_addr_t dma_src, + size_t len, dma_addr_t dma_remote, + enum dma_transfer_direction dir) +{ + struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ? + epf_test->dma_chan_tx : epf_test->dma_chan_rx; + dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst; + enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + struct pci_epf *epf = epf_test->epf; + struct dma_async_tx_descriptor *tx; + struct dma_slave_config sconf = {}; + struct device *dev = &epf->dev; + int ret; + + if (IS_ERR_OR_NULL(chan)) { + dev_err(dev, "Invalid DMA memcpy channel\n"); + return -EINVAL; + } + + if (epf_test->dma_private) { + sconf.direction = dir; + if (dir == DMA_MEM_TO_DEV) + sconf.dst_addr = dma_remote; + else + sconf.src_addr = dma_remote; + + if (dmaengine_slave_config(chan, &sconf)) { + dev_err(dev, "DMA slave config fail\n"); + return -EIO; + } + tx = dmaengine_prep_slave_single(chan, dma_local, len, dir, + flags); + } else { + tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, + flags); + } + + if (!tx) { + dev_err(dev, "Failed to prepare DMA memcpy\n"); + return -EIO; + } + + reinit_completion(&epf_test->transfer_complete); + epf_test->transfer_chan = chan; + tx->callback = pci_epf_test_dma_callback; + tx->callback_param = epf_test; + epf_test->transfer_cookie = tx->tx_submit(tx); + + ret = dma_submit_error(epf_test->transfer_cookie); + if (ret) { + dev_err(dev, "Failed to do DMA tx_submit %d\n", ret); + goto terminate; + } + + dma_async_issue_pending(chan); + ret = wait_for_completion_interruptible(&epf_test->transfer_complete); + if (ret < 0) { + dev_err(dev, "DMA wait_for_completion interrupted\n"); + goto terminate; + } + + if (epf_test->transfer_status == DMA_ERROR) { + dev_err(dev, "DMA transfer failed\n"); + ret = -EIO; + } + +terminate: + dmaengine_terminate_sync(chan); + + return ret; +} + +struct epf_dma_filter { + struct device *dev; + u32 dma_mask; +}; + +static bool epf_dma_filter_fn(struct dma_chan *chan, void *node) +{ + struct epf_dma_filter *filter = node; + struct dma_slave_caps caps; + + memset(&caps, 0, sizeof(caps)); + dma_get_slave_caps(chan, &caps); + + return chan->device->dev == filter->dev + && (filter->dma_mask & caps.directions); +} + +/** + * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel + * @epf_test: the EPF test device that performs data transfer operation + * + * Function to initialize EPF test DMA channel. + */ +static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) +{ + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct epf_dma_filter filter; + struct dma_chan *dma_chan; + dma_cap_mask_t mask; + int ret; + + filter.dev = epf->epc->dev.parent; + filter.dma_mask = BIT(DMA_DEV_TO_MEM); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); + if (!dma_chan) { + dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n"); + goto fail_back_tx; + } + + epf_test->dma_chan_rx = dma_chan; + + filter.dma_mask = BIT(DMA_MEM_TO_DEV); + dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); + + if (!dma_chan) { + dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n"); + goto fail_back_rx; + } + + epf_test->dma_chan_tx = dma_chan; + epf_test->dma_private = true; + + init_completion(&epf_test->transfer_complete); + + return 0; + +fail_back_rx: + dma_release_channel(epf_test->dma_chan_rx); + epf_test->dma_chan_tx = NULL; + +fail_back_tx: + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + + dma_chan = dma_request_chan_by_mask(&mask); + if (IS_ERR(dma_chan)) { + ret = PTR_ERR(dma_chan); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get DMA channel\n"); + return ret; + } + init_completion(&epf_test->transfer_complete); + + epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan; + + return 0; +} + +/** + * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel + * @epf_test: the EPF test device that performs data transfer operation + * + * Helper to cleanup EPF test DMA channel. + */ +static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) +{ + if (!epf_test->dma_supported) + return; + + dma_release_channel(epf_test->dma_chan_tx); + if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { + epf_test->dma_chan_tx = NULL; + epf_test->dma_chan_rx = NULL; + return; + } + + dma_release_channel(epf_test->dma_chan_rx); + epf_test->dma_chan_rx = NULL; + + return; +} + +static void pci_epf_test_print_rate(const char *ops, u64 size, + struct timespec64 *start, + struct timespec64 *end, bool dma) +{ + struct timespec64 ts; + u64 rate, ns; + + ts = timespec64_sub(*end, *start); + + /* convert both size (stored in 'rate') and time in terms of 'ns' */ + ns = timespec64_to_ns(&ts); + rate = size * NSEC_PER_SEC; + + /* Divide both size (stored in 'rate') and ns by a common factor */ + while (ns > UINT_MAX) { + rate >>= 1; + ns >>= 1; + } + + if (!ns) + return; + + /* calculate the rate */ + do_div(rate, (uint32_t)ns); + + pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t" + "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO", + (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024); +} + +static int pci_epf_test_copy(struct pci_epf_test *epf_test) +{ + int ret; + bool use_dma; + void __iomem *src_addr; + void __iomem *dst_addr; + phys_addr_t src_phys_addr; + phys_addr_t dst_phys_addr; + struct timespec64 start, end; + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + + src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); + if (!src_addr) { + dev_err(dev, "Failed to allocate source address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + ret = -ENOMEM; + goto err; + } + + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, + reg->src_addr, reg->size); + if (ret) { + dev_err(dev, "Failed to map source address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto err_src_addr; + } + + dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); + if (!dst_addr) { + dev_err(dev, "Failed to allocate destination address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + ret = -ENOMEM; + goto err_src_map_addr; + } + + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, + reg->dst_addr, reg->size); + if (ret) { + dev_err(dev, "Failed to map destination address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + goto err_dst_addr; + } + + ktime_get_ts64(&start); + use_dma = !!(reg->flags & FLAG_USE_DMA); + if (use_dma) { + if (!epf_test->dma_supported) { + dev_err(dev, "Cannot transfer data using DMA\n"); + ret = -EINVAL; + goto err_map_addr; + } + + if (epf_test->dma_private) { + dev_err(dev, "Cannot transfer data using DMA\n"); + ret = -EINVAL; + goto err_map_addr; + } + + ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, + src_phys_addr, reg->size, 0, + DMA_MEM_TO_MEM); + if (ret) + dev_err(dev, "Data transfer failed\n"); + } else { + void *buf; + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + memcpy_fromio(buf, src_addr, reg->size); + memcpy_toio(dst_addr, buf, reg->size); + kfree(buf); + } + ktime_get_ts64(&end); + pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); + +err_map_addr: + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); + +err_dst_addr: + pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); + +err_src_map_addr: + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); + +err_src_addr: + pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); + +err: + return ret; +} + +static int pci_epf_test_read(struct pci_epf_test *epf_test) +{ + int ret; + void __iomem *src_addr; + void *buf; + u32 crc32; + bool use_dma; + phys_addr_t phys_addr; + phys_addr_t dst_phys_addr; + struct timespec64 start, end; + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + struct device *dma_dev = epf->epc->dev.parent; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + + src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); + if (!src_addr) { + dev_err(dev, "Failed to allocate address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + ret = -ENOMEM; + goto err; + } + + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, + reg->src_addr, reg->size); + if (ret) { + dev_err(dev, "Failed to map address\n"); + reg->status = STATUS_SRC_ADDR_INVALID; + goto err_addr; + } + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + use_dma = !!(reg->flags & FLAG_USE_DMA); + if (use_dma) { + if (!epf_test->dma_supported) { + dev_err(dev, "Cannot transfer data using DMA\n"); + ret = -EINVAL; + goto err_dma_map; + } + + dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, + DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dst_phys_addr)) { + dev_err(dev, "Failed to map destination buffer addr\n"); + ret = -ENOMEM; + goto err_dma_map; + } + + ktime_get_ts64(&start); + ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, + phys_addr, reg->size, + reg->src_addr, DMA_DEV_TO_MEM); + if (ret) + dev_err(dev, "Data transfer failed\n"); + ktime_get_ts64(&end); + + dma_unmap_single(dma_dev, dst_phys_addr, reg->size, + DMA_FROM_DEVICE); + } else { + ktime_get_ts64(&start); + memcpy_fromio(buf, src_addr, reg->size); + ktime_get_ts64(&end); + } + + pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma); + + crc32 = crc32_le(~0, buf, reg->size); + if (crc32 != reg->checksum) + ret = -EIO; + +err_dma_map: + kfree(buf); + +err_map_addr: + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); + +err_addr: + pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); + +err: + return ret; +} + +static int pci_epf_test_write(struct pci_epf_test *epf_test) +{ + int ret; + void __iomem *dst_addr; + void *buf; + bool use_dma; + phys_addr_t phys_addr; + phys_addr_t src_phys_addr; + struct timespec64 start, end; + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + struct device *dma_dev = epf->epc->dev.parent; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + + dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); + if (!dst_addr) { + dev_err(dev, "Failed to allocate address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + ret = -ENOMEM; + goto err; + } + + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, + reg->dst_addr, reg->size); + if (ret) { + dev_err(dev, "Failed to map address\n"); + reg->status = STATUS_DST_ADDR_INVALID; + goto err_addr; + } + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + get_random_bytes(buf, reg->size); + reg->checksum = crc32_le(~0, buf, reg->size); + + use_dma = !!(reg->flags & FLAG_USE_DMA); + if (use_dma) { + if (!epf_test->dma_supported) { + dev_err(dev, "Cannot transfer data using DMA\n"); + ret = -EINVAL; + goto err_dma_map; + } + + src_phys_addr = dma_map_single(dma_dev, buf, reg->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, src_phys_addr)) { + dev_err(dev, "Failed to map source buffer addr\n"); + ret = -ENOMEM; + goto err_dma_map; + } + + ktime_get_ts64(&start); + + ret = pci_epf_test_data_transfer(epf_test, phys_addr, + src_phys_addr, reg->size, + reg->dst_addr, + DMA_MEM_TO_DEV); + if (ret) + dev_err(dev, "Data transfer failed\n"); + ktime_get_ts64(&end); + + dma_unmap_single(dma_dev, src_phys_addr, reg->size, + DMA_TO_DEVICE); + } else { + ktime_get_ts64(&start); + memcpy_toio(dst_addr, buf, reg->size); + ktime_get_ts64(&end); + } + + pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma); + + /* + * wait 1ms inorder for the write to complete. Without this delay L3 + * error in observed in the host system. + */ + usleep_range(1000, 2000); + +err_dma_map: + kfree(buf); + +err_map_addr: + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); + +err_addr: + pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); + +err: + return ret; +} + +static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type, + u16 irq) +{ + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + + reg->status |= STATUS_IRQ_RAISED; + + switch (irq_type) { + case IRQ_TYPE_LEGACY: + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, + PCI_EPC_IRQ_LEGACY, 0); + break; + case IRQ_TYPE_MSI: + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, + PCI_EPC_IRQ_MSI, irq); + break; + case IRQ_TYPE_MSIX: + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, + PCI_EPC_IRQ_MSIX, irq); + break; + default: + dev_err(dev, "Failed to raise IRQ, unknown type\n"); + break; + } +} + +static void pci_epf_test_cmd_handler(struct work_struct *work) +{ + int ret; + int count; + u32 command; + struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, + cmd_handler.work); + struct pci_epf *epf = epf_test->epf; + struct device *dev = &epf->dev; + struct pci_epc *epc = epf->epc; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + + command = reg->command; + if (!command) + goto reset_handler; + + reg->command = 0; + reg->status = 0; + + if (reg->irq_type > IRQ_TYPE_MSIX) { + dev_err(dev, "Failed to detect IRQ type\n"); + goto reset_handler; + } + + if (command & COMMAND_RAISE_LEGACY_IRQ) { + reg->status = STATUS_IRQ_RAISED; + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, + PCI_EPC_IRQ_LEGACY, 0); + goto reset_handler; + } + + if (command & COMMAND_WRITE) { + ret = pci_epf_test_write(epf_test); + if (ret) + reg->status |= STATUS_WRITE_FAIL; + else + reg->status |= STATUS_WRITE_SUCCESS; + pci_epf_test_raise_irq(epf_test, reg->irq_type, + reg->irq_number); + goto reset_handler; + } + + if (command & COMMAND_READ) { + ret = pci_epf_test_read(epf_test); + if (!ret) + reg->status |= STATUS_READ_SUCCESS; + else + reg->status |= STATUS_READ_FAIL; + pci_epf_test_raise_irq(epf_test, reg->irq_type, + reg->irq_number); + goto reset_handler; + } + + if (command & COMMAND_COPY) { + ret = pci_epf_test_copy(epf_test); + if (!ret) + reg->status |= STATUS_COPY_SUCCESS; + else + reg->status |= STATUS_COPY_FAIL; + pci_epf_test_raise_irq(epf_test, reg->irq_type, + reg->irq_number); + goto reset_handler; + } + + if (command & COMMAND_RAISE_MSI_IRQ) { + count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); + if (reg->irq_number > count || count <= 0) + goto reset_handler; + reg->status = STATUS_IRQ_RAISED; + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, + PCI_EPC_IRQ_MSI, reg->irq_number); + goto reset_handler; + } + + if (command & COMMAND_RAISE_MSIX_IRQ) { + count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); + if (reg->irq_number > count || count <= 0) + goto reset_handler; + reg->status = STATUS_IRQ_RAISED; + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, + PCI_EPC_IRQ_MSIX, reg->irq_number); + goto reset_handler; + } + +reset_handler: + queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, + msecs_to_jiffies(1)); +} + +static void pci_epf_test_unbind(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + struct pci_epc *epc = epf->epc; + struct pci_epf_bar *epf_bar; + int bar; + + cancel_delayed_work(&epf_test->cmd_handler); + pci_epf_test_clean_dma_chan(epf_test); + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + epf_bar = &epf->bar[bar]; + + if (epf_test->reg[bar]) { + pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, + epf_bar); + pci_epf_free_space(epf, epf_test->reg[bar], bar, + PRIMARY_INTERFACE); + } + } +} + +static int pci_epf_test_set_bar(struct pci_epf *epf) +{ + int bar, add; + int ret; + struct pci_epf_bar *epf_bar; + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + const struct pci_epc_features *epc_features; + + epc_features = epf_test->epc_features; + + for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { + epf_bar = &epf->bar[bar]; + /* + * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 + * if the specific implementation required a 64-bit BAR, + * even if we only requested a 32-bit BAR. + */ + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; + + if (!!(epc_features->reserved_bar & (1 << bar))) + continue; + + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, + epf_bar); + if (ret) { + pci_epf_free_space(epf, epf_test->reg[bar], bar, + PRIMARY_INTERFACE); + dev_err(dev, "Failed to set BAR%d\n", bar); + if (bar == test_reg_bar) + return ret; + } + } + + return 0; +} + +static int pci_epf_test_core_init(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + struct pci_epf_header *header = epf->header; + const struct pci_epc_features *epc_features; + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + bool msix_capable = false; + bool msi_capable = true; + int ret; + + epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); + if (epc_features) { + msix_capable = epc_features->msix_capable; + msi_capable = epc_features->msi_capable; + } + + if (epf->vfunc_no <= 1) { + ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header); + if (ret) { + dev_err(dev, "Configuration header write failed\n"); + return ret; + } + } + + ret = pci_epf_test_set_bar(epf); + if (ret) + return ret; + + if (msi_capable) { + ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, + epf->msi_interrupts); + if (ret) { + dev_err(dev, "MSI configuration failed\n"); + return ret; + } + } + + if (msix_capable) { + ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no, + epf->msix_interrupts, + epf_test->test_reg_bar, + epf_test->msix_table_offset); + if (ret) { + dev_err(dev, "MSI-X configuration failed\n"); + return ret; + } + } + + return 0; +} + +static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct pci_epf *epf = container_of(nb, struct pci_epf, nb); + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + int ret; + + switch (val) { + case CORE_INIT: + ret = pci_epf_test_core_init(epf); + if (ret) + return NOTIFY_BAD; + break; + + case LINK_UP: + queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, + msecs_to_jiffies(1)); + break; + + default: + dev_err(&epf->dev, "Invalid EPF test notifier event\n"); + return NOTIFY_BAD; + } + + return NOTIFY_OK; +} + +static int pci_epf_test_alloc_space(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + struct device *dev = &epf->dev; + struct pci_epf_bar *epf_bar; + size_t msix_table_size = 0; + size_t test_reg_bar_size; + size_t pba_size = 0; + bool msix_capable; + void *base; + int bar, add; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + const struct pci_epc_features *epc_features; + size_t test_reg_size; + + epc_features = epf_test->epc_features; + + test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); + + msix_capable = epc_features->msix_capable; + if (msix_capable) { + msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; + epf_test->msix_table_offset = test_reg_bar_size; + /* Align to QWORD or 8 Bytes */ + pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); + } + test_reg_size = test_reg_bar_size + msix_table_size + pba_size; + + if (epc_features->bar_fixed_size[test_reg_bar]) { + if (test_reg_size > bar_size[test_reg_bar]) + return -ENOMEM; + test_reg_size = bar_size[test_reg_bar]; + } + + base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, + epc_features->align, PRIMARY_INTERFACE); + if (!base) { + dev_err(dev, "Failed to allocated register space\n"); + return -ENOMEM; + } + epf_test->reg[test_reg_bar] = base; + + for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { + epf_bar = &epf->bar[bar]; + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; + + if (bar == test_reg_bar) + continue; + + if (!!(epc_features->reserved_bar & (1 << bar))) + continue; + + base = pci_epf_alloc_space(epf, bar_size[bar], bar, + epc_features->align, + PRIMARY_INTERFACE); + if (!base) + dev_err(dev, "Failed to allocate space for BAR%d\n", + bar); + epf_test->reg[bar] = base; + } + + return 0; +} + +static void pci_epf_configure_bar(struct pci_epf *epf, + const struct pci_epc_features *epc_features) +{ + struct pci_epf_bar *epf_bar; + bool bar_fixed_64bit; + int i; + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + epf_bar = &epf->bar[i]; + bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i)); + if (bar_fixed_64bit) + epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + if (epc_features->bar_fixed_size[i]) + bar_size[i] = epc_features->bar_fixed_size[i]; + } +} + +static int pci_epf_test_bind(struct pci_epf *epf) +{ + int ret; + struct pci_epf_test *epf_test = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features; + enum pci_barno test_reg_bar = BAR_0; + struct pci_epc *epc = epf->epc; + bool linkup_notifier = false; + bool core_init_notifier = false; + + if (WARN_ON_ONCE(!epc)) + return -EINVAL; + + epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); + if (!epc_features) { + dev_err(&epf->dev, "epc_features not implemented\n"); + return -EOPNOTSUPP; + } + + linkup_notifier = epc_features->linkup_notifier; + core_init_notifier = epc_features->core_init_notifier; + test_reg_bar = pci_epc_get_first_free_bar(epc_features); + if (test_reg_bar < 0) + return -EINVAL; + pci_epf_configure_bar(epf, epc_features); + + epf_test->test_reg_bar = test_reg_bar; + epf_test->epc_features = epc_features; + + ret = pci_epf_test_alloc_space(epf); + if (ret) + return ret; + + if (!core_init_notifier) { + ret = pci_epf_test_core_init(epf); + if (ret) + return ret; + } + + epf_test->dma_supported = true; + + ret = pci_epf_test_init_dma_chan(epf_test); + if (ret) + epf_test->dma_supported = false; + + if (linkup_notifier || core_init_notifier) { + epf->nb.notifier_call = pci_epf_test_notifier; + pci_epc_register_notifier(epc, &epf->nb); + } else { + queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); + } + + return 0; +} + +static const struct pci_epf_device_id pci_epf_test_ids[] = { + { + .name = "pci_epf_test", + }, + {}, +}; + +static int pci_epf_test_probe(struct pci_epf *epf) +{ + struct pci_epf_test *epf_test; + struct device *dev = &epf->dev; + + epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); + if (!epf_test) + return -ENOMEM; + + epf->header = &test_header; + epf_test->epf = epf; + + INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); + + epf_set_drvdata(epf, epf_test); + return 0; +} + +static struct pci_epf_ops ops = { + .unbind = pci_epf_test_unbind, + .bind = pci_epf_test_bind, +}; + +static struct pci_epf_driver test_driver = { + .driver.name = "pci_epf_test", + .probe = pci_epf_test_probe, + .id_table = pci_epf_test_ids, + .ops = &ops, + .owner = THIS_MODULE, +}; + +static int __init pci_epf_test_init(void) +{ + int ret; + + kpcitest_workqueue = alloc_workqueue("kpcitest", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!kpcitest_workqueue) { + pr_err("Failed to allocate the kpcitest work queue\n"); + return -ENOMEM; + } + + ret = pci_epf_register_driver(&test_driver); + if (ret) { + destroy_workqueue(kpcitest_workqueue); + pr_err("Failed to register pci epf test driver --> %d\n", ret); + return ret; + } + + return 0; +} +module_init(pci_epf_test_init); + +static void __exit pci_epf_test_exit(void) +{ + if (kpcitest_workqueue) + destroy_workqueue(kpcitest_workqueue); + pci_epf_unregister_driver(&test_driver); +} +module_exit(pci_epf_test_exit); + +MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c new file mode 100644 index 000000000..8c6931210 --- /dev/null +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -0,0 +1,1468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Endpoint Function Driver to implement Non-Transparent Bridge functionality + * Between PCI RC and EP + * + * Copyright (C) 2020 Texas Instruments + * Copyright (C) 2022 NXP + * + * Based on pci-epf-ntb.c + * Author: Frank Li <Frank.Li@nxp.com> + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +/* + * +------------+ +---------------------------------------+ + * | | | | + * +------------+ | +--------------+ + * | NTB | | | NTB | + * | NetDev | | | NetDev | + * +------------+ | +--------------+ + * | NTB | | | NTB | + * | Transfer | | | Transfer | + * +------------+ | +--------------+ + * | | | | | + * | PCI NTB | | | | + * | EPF | | | | + * | Driver | | | PCI Virtual | + * | | +---------------+ | NTB Driver | + * | | | PCI EP NTB |<------>| | + * | | | FN Driver | | | + * +------------+ +---------------+ +--------------+ + * | | | | | | + * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI | + * | | PCI | | | Bus | + * +------------+ +---------------+--------+--------------+ + * PCIe Root Port PCI EP + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/ntb.h> + +static struct workqueue_struct *kpcintb_workqueue; + +#define COMMAND_CONFIGURE_DOORBELL 1 +#define COMMAND_TEARDOWN_DOORBELL 2 +#define COMMAND_CONFIGURE_MW 3 +#define COMMAND_TEARDOWN_MW 4 +#define COMMAND_LINK_UP 5 +#define COMMAND_LINK_DOWN 6 + +#define COMMAND_STATUS_OK 1 +#define COMMAND_STATUS_ERROR 2 + +#define LINK_STATUS_UP BIT(0) + +#define SPAD_COUNT 64 +#define DB_COUNT 4 +#define NTB_MW_OFFSET 2 +#define DB_COUNT_MASK GENMASK(15, 0) +#define MSIX_ENABLE BIT(16) +#define MAX_DB_COUNT 32 +#define MAX_MW 4 + +enum epf_ntb_bar { + BAR_CONFIG, + BAR_DB, + BAR_MW0, + BAR_MW1, + BAR_MW2, +}; + +/* + * +--------------------------------------------------+ Base + * | | + * | | + * | | + * | Common Control Register | + * | | + * | | + * | | + * +-----------------------+--------------------------+ Base+span_offset + * | | | + * | Peer Span Space | Span Space | + * | | | + * | | | + * +-----------------------+--------------------------+ Base+span_offset + * | | | +span_count * 4 + * | | | + * | Span Space | Peer Span Space | + * | | | + * +-----------------------+--------------------------+ + * Virtual PCI PCIe Endpoint + * NTB Driver NTB Driver + */ +struct epf_ntb_ctrl { + u32 command; + u32 argument; + u16 command_status; + u16 link_status; + u32 topology; + u64 addr; + u64 size; + u32 num_mws; + u32 reserved; + u32 spad_offset; + u32 spad_count; + u32 db_entry_size; + u32 db_data[MAX_DB_COUNT]; + u32 db_offset[MAX_DB_COUNT]; +} __packed; + +struct epf_ntb { + struct ntb_dev ntb; + struct pci_epf *epf; + struct config_group group; + + u32 num_mws; + u32 db_count; + u32 spad_count; + u64 mws_size[MAX_MW]; + u64 db; + u32 vbus_number; + u16 vntb_pid; + u16 vntb_vid; + + bool linkup; + u32 spad_size; + + enum pci_barno epf_ntb_bar[6]; + + struct epf_ntb_ctrl *reg; + + phys_addr_t epf_db_phy; + void __iomem *epf_db; + + phys_addr_t vpci_mw_phy[MAX_MW]; + void __iomem *vpci_mw_addr[MAX_MW]; + + struct delayed_work cmd_handler; +}; + +#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group) +#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb) + +static struct pci_epf_header epf_ntb_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .baseclass_code = PCI_BASE_CLASS_MEMORY, + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +/** + * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host (VHOST) + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @link_up: true or false indicating Link is UP or Down + * + * Once NTB function in HOST invoke ntb_link_enable(), + * this NTB function driver will trigger a link event to VHOST. + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) +{ + if (link_up) + ntb->reg->link_status |= LINK_STATUS_UP; + else + ntb->reg->link_status &= ~LINK_STATUS_UP; + + ntb_link_event(&ntb->ntb); + return 0; +} + +/** + * epf_ntb_configure_mw() - Configure the Outbound Address Space for VHOST + * to access the memory window of HOST + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * EP Outbound Window + * +--------+ +-----------+ + * | | | | + * | | | | + * | | | | + * | | | | + * | | +-----------+ + * | Virtual| | Memory Win| + * | NTB | -----------> | | + * | Driver | | | + * | | +-----------+ + * | | | | + * | | | | + * +--------+ +-----------+ + * VHOST PCI EP + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw) +{ + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + u64 addr, size; + int ret = 0; + + phys_addr = ntb->vpci_mw_phy[mw]; + addr = ntb->reg->addr; + size = ntb->reg->size; + + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size); + if (ret) + dev_err(&ntb->epf->epc->dev, + "Failed to map memory window %d address\n", mw); + return ret; +} + +/** + * epf_ntb_teardown_mw() - Teardown the configured OB ATU + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using + * pci_epc_unmap_addr() + */ +static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw) +{ + pci_epc_unmap_addr(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + ntb->vpci_mw_phy[mw]); +} + +/** + * epf_ntb_cmd_handler() - Handle commands provided by the NTB HOST + * @work: work_struct for the epf_ntb_epc + * + * Workqueue function that gets invoked for the two epf_ntb_epc + * periodically (once every 5ms) to see if it has received any commands + * from NTB HOST. The HOST can send commands to configure doorbell or + * configure memory window or to update link status. + */ +static void epf_ntb_cmd_handler(struct work_struct *work) +{ + struct epf_ntb_ctrl *ctrl; + u32 command, argument; + struct epf_ntb *ntb; + struct device *dev; + int ret; + int i; + + ntb = container_of(work, struct epf_ntb, cmd_handler.work); + + for (i = 1; i < ntb->db_count; i++) { + if (readl(ntb->epf_db + i * 4)) { + if (readl(ntb->epf_db + i * 4)) + ntb->db |= 1 << (i - 1); + + ntb_db_event(&ntb->ntb, i); + writel(0, ntb->epf_db + i * 4); + } + } + + ctrl = ntb->reg; + command = ctrl->command; + if (!command) + goto reset_handler; + argument = ctrl->argument; + + ctrl->command = 0; + ctrl->argument = 0; + + ctrl = ntb->reg; + dev = &ntb->epf->dev; + + switch (command) { + case COMMAND_CONFIGURE_DOORBELL: + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_DOORBELL: + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_CONFIGURE_MW: + ret = epf_ntb_configure_mw(ntb, argument); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_MW: + epf_ntb_teardown_mw(ntb, argument); + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_LINK_UP: + ntb->linkup = true; + ret = epf_ntb_link_up(ntb, true); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + goto reset_handler; + case COMMAND_LINK_DOWN: + ntb->linkup = false; + ret = epf_ntb_link_up(ntb, false); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + default: + dev_err(dev, "UNKNOWN command: %d\n", command); + break; + } + +reset_handler: + queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler, + msecs_to_jiffies(5)); +} + +/** + * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR + * @ntb: EPC associated with one of the HOST which holds peer's outbound + * address. + * + * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * self scratchpad region (removes inbound ATU configuration). While BAR0 is + * the default self scratchpad BAR, an NTB could have other BARs for self + * scratchpad (because of reserved BARs). This function can get the exact BAR + * used for self scratchpad from epf_ntb_bar[BAR_CONFIG]. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. Also note VHOST's peer + * scratchpad is HOST's self scratchpad. + * + * Returns: void + */ +static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb->epf->bar[barno]; + + pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar); +} + +/** + * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Map BAR0 of EP CONTROLLER which contains the VHOST's config and + * self scratchpad region. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct device *dev; + int ret; + + dev = &ntb->epf->dev; + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb->epf->bar[barno]; + + ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n"); + return ret; + } + return 0; +} + +/** + * epf_ntb_config_spad_bar_free() - Free the physical memory associated with + * config + scratchpad region + * @ntb: NTB device that facilitates communication between HOST and VHOST + */ +static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb) +{ + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + pci_epf_free_space(ntb->epf, ntb->reg, barno, 0); +} + +/** + * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad + * region + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Allocate the Local Memory mentioned in the above diagram. The size of + * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION + * is obtained from "spad-count" configfs entry. + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) +{ + size_t align; + enum pci_barno barno; + struct epf_ntb_ctrl *ctrl; + u32 spad_size, ctrl_size; + u64 size; + struct pci_epf *epf = ntb->epf; + struct device *dev = &epf->dev; + u32 spad_count; + void *base; + int i; + const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc, + epf->func_no, + epf->vfunc_no); + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + size = epc_features->bar_fixed_size[barno]; + align = epc_features->align; + + if ((!IS_ALIGNED(size, align))) + return -EINVAL; + + spad_count = ntb->spad_count; + + ctrl_size = sizeof(struct epf_ntb_ctrl); + spad_size = 2 * spad_count * 4; + + if (!align) { + ctrl_size = roundup_pow_of_two(ctrl_size); + spad_size = roundup_pow_of_two(spad_size); + } else { + ctrl_size = ALIGN(ctrl_size, align); + spad_size = ALIGN(spad_size, align); + } + + if (!size) + size = ctrl_size + spad_size; + else if (size < ctrl_size + spad_size) + return -EINVAL; + + base = pci_epf_alloc_space(epf, size, barno, align, 0); + if (!base) { + dev_err(dev, "Config/Status/SPAD alloc region fail\n"); + return -ENOMEM; + } + + ntb->reg = base; + + ctrl = ntb->reg; + ctrl->spad_offset = ctrl_size; + + ctrl->spad_count = spad_count; + ctrl->num_mws = ntb->num_mws; + ntb->spad_size = spad_size; + + ctrl->db_entry_size = 4; + + for (i = 0; i < ntb->db_count; i++) { + ntb->reg->db_data[i] = 1 + i; + ntb->reg->db_offset[i] = 0; + } + + return 0; +} + +/** + * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Configure MSI/MSI-X capability for each interface with number of + * interrupts equal to "db_count" configfs entry. + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_configure_interrupt(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + struct device *dev; + u32 db_count; + int ret; + + dev = &ntb->epf->dev; + + epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); + + if (!(epc_features->msix_capable || epc_features->msi_capable)) { + dev_err(dev, "MSI or MSI-X is required for doorbell\n"); + return -EINVAL; + } + + db_count = ntb->db_count; + if (db_count > MAX_DB_COUNT) { + dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT); + return -EINVAL; + } + + ntb->db_count = db_count; + + if (epc_features->msi_capable) { + ret = pci_epc_set_msi(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + 16); + if (ret) { + dev_err(dev, "MSI configuration failed\n"); + return ret; + } + } + + return 0; +} + +/** + * epf_ntb_db_bar_init() - Configure Doorbell window BARs + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_db_bar_init(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + u32 align; + struct device *dev = &ntb->epf->dev; + int ret; + struct pci_epf_bar *epf_bar; + void __iomem *mw_addr; + enum pci_barno barno; + size_t size = 4 * ntb->db_count; + + epc_features = pci_epc_get_features(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no); + align = epc_features->align; + + if (size < 128) + size = 128; + + if (align) + size = ALIGN(size, align); + else + size = roundup_pow_of_two(size); + + barno = ntb->epf_ntb_bar[BAR_DB]; + + mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0); + if (!mw_addr) { + dev_err(dev, "Failed to allocate OB address\n"); + return -ENOMEM; + } + + ntb->epf_db = mw_addr; + + epf_bar = &ntb->epf->bar[barno]; + + ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "Doorbell BAR set failed\n"); + goto err_alloc_peer_mem; + } + return ret; + +err_alloc_peer_mem: + pci_epf_free_space(ntb->epf, mw_addr, barno, 0); + return -1; +} + +static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws); + +/** + * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory + * allocated in peer's outbound address space + * @ntb: NTB device that facilitates communication between HOST and VHOST + */ +static void epf_ntb_db_bar_clear(struct epf_ntb *ntb) +{ + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_DB]; + pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0); + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); +} + +/** + * epf_ntb_mw_bar_init() - Configure Memory window BARs + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_mw_bar_init(struct epf_ntb *ntb) +{ + int ret = 0; + int i; + u64 size; + enum pci_barno barno; + struct device *dev = &ntb->epf->dev; + + for (i = 0; i < ntb->num_mws; i++) { + size = ntb->mws_size[i]; + barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + + ntb->epf->bar[barno].barno = barno; + ntb->epf->bar[barno].size = size; + ntb->epf->bar[barno].addr = NULL; + ntb->epf->bar[barno].phys_addr = 0; + ntb->epf->bar[barno].flags |= upper_32_bits(size) ? + PCI_BASE_ADDRESS_MEM_TYPE_64 : + PCI_BASE_ADDRESS_MEM_TYPE_32; + + ret = pci_epc_set_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); + if (ret) { + dev_err(dev, "MW set failed\n"); + goto err_alloc_mem; + } + + /* Allocate EPC outbound memory windows to vpci vntb device */ + ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc, + &ntb->vpci_mw_phy[i], + size); + if (!ntb->vpci_mw_addr[i]) { + ret = -ENOMEM; + dev_err(dev, "Failed to allocate source address\n"); + goto err_set_bar; + } + } + + return ret; + +err_set_bar: + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); +err_alloc_mem: + epf_ntb_mw_bar_clear(ntb, i); + return ret; +} + +/** + * epf_ntb_mw_bar_clear() - Clear Memory window BARs + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @num_mws: the number of Memory window BARs that to be cleared + */ +static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws) +{ + enum pci_barno barno; + int i; + + for (i = 0; i < num_mws; i++) { + barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); + + pci_epc_mem_free_addr(ntb->epf->epc, + ntb->vpci_mw_phy[i], + ntb->vpci_mw_addr[i], + ntb->mws_size[i]); + } +} + +/** + * epf_ntb_epc_destroy() - Cleanup NTB EPC interface + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces + */ +static void epf_ntb_epc_destroy(struct epf_ntb *ntb) +{ + pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0); + pci_epc_put(ntb->epf->epc); +} + +/** + * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB + * constructs (scratchpad region, doorbell, memorywindow) + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + enum pci_barno barno; + enum epf_ntb_bar bar; + struct device *dev; + u32 num_mws; + int i; + + barno = BAR_0; + num_mws = ntb->num_mws; + dev = &ntb->epf->dev; + epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); + + /* These are required BARs which are mandatory for NTB functionality */ + for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + dev_err(dev, "Fail to get NTB function BAR\n"); + return barno; + } + ntb->epf_ntb_bar[bar] = barno; + } + + /* These are optional BARs which don't impact NTB functionality */ + for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + ntb->num_mws = i; + dev_dbg(dev, "BAR not available for > MW%d\n", i + 1); + } + ntb->epf_ntb_bar[bar] = barno; + } + + return 0; +} + +/** + * epf_ntb_epc_init() - Initialize NTB interface + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Wrapper to initialize a particular EPC interface and start the workqueue + * to check for commands from HOST. This function will write to the + * EP controller HW for configuring it. + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_epc_init(struct epf_ntb *ntb) +{ + u8 func_no, vfunc_no; + struct pci_epc *epc; + struct pci_epf *epf; + struct device *dev; + int ret; + + epf = ntb->epf; + dev = &epf->dev; + epc = epf->epc; + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = epf_ntb_config_sspad_bar_set(ntb); + if (ret) { + dev_err(dev, "Config/self SPAD BAR init failed"); + return ret; + } + + ret = epf_ntb_configure_interrupt(ntb); + if (ret) { + dev_err(dev, "Interrupt configuration failed\n"); + goto err_config_interrupt; + } + + ret = epf_ntb_db_bar_init(ntb); + if (ret) { + dev_err(dev, "DB BAR init failed\n"); + goto err_db_bar_init; + } + + ret = epf_ntb_mw_bar_init(ntb); + if (ret) { + dev_err(dev, "MW BAR init failed\n"); + goto err_mw_bar_init; + } + + if (vfunc_no <= 1) { + ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header); + if (ret) { + dev_err(dev, "Configuration header write failed\n"); + goto err_write_header; + } + } + + INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler); + queue_work(kpcintb_workqueue, &ntb->cmd_handler.work); + + return 0; + +err_write_header: + epf_ntb_mw_bar_clear(ntb, ntb->num_mws); +err_mw_bar_init: + epf_ntb_db_bar_clear(ntb); +err_db_bar_init: +err_config_interrupt: + epf_ntb_config_sspad_bar_clear(ntb); + + return ret; +} + + +/** + * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces + * @ntb: NTB device that facilitates communication between HOST and VHOST + * + * Wrapper to cleanup all NTB interfaces. + */ +static void epf_ntb_epc_cleanup(struct epf_ntb *ntb) +{ + epf_ntb_db_bar_clear(ntb); + epf_ntb_mw_bar_clear(ntb, ntb->num_mws); +} + +#define EPF_NTB_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + \ + return sprintf(page, "%d\n", ntb->_name); \ +} + +#define EPF_NTB_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + u32 val; \ + int ret; \ + \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + ntb->_name = val; \ + \ + return len; \ +} + +#define EPF_NTB_MW_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + struct device *dev = &ntb->epf->dev; \ + int win_no; \ + \ + if (sscanf(#_name, "mw%d", &win_no) != 1) \ + return -EINVAL; \ + \ + if (win_no <= 0 || win_no > ntb->num_mws) { \ + dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \ + return -EINVAL; \ + } \ + \ + return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \ +} + +#define EPF_NTB_MW_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + struct device *dev = &ntb->epf->dev; \ + int win_no; \ + u64 val; \ + int ret; \ + \ + ret = kstrtou64(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + if (sscanf(#_name, "mw%d", &win_no) != 1) \ + return -EINVAL; \ + \ + if (win_no <= 0 || win_no > ntb->num_mws) { \ + dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \ + return -EINVAL; \ + } \ + \ + ntb->mws_size[win_no - 1] = val; \ + \ + return len; \ +} + +static ssize_t epf_ntb_num_mws_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct epf_ntb *ntb = to_epf_ntb(group); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret) + return ret; + + if (val > MAX_MW) + return -EINVAL; + + ntb->num_mws = val; + + return len; +} + +EPF_NTB_R(spad_count) +EPF_NTB_W(spad_count) +EPF_NTB_R(db_count) +EPF_NTB_W(db_count) +EPF_NTB_R(num_mws) +EPF_NTB_R(vbus_number) +EPF_NTB_W(vbus_number) +EPF_NTB_R(vntb_pid) +EPF_NTB_W(vntb_pid) +EPF_NTB_R(vntb_vid) +EPF_NTB_W(vntb_vid) +EPF_NTB_MW_R(mw1) +EPF_NTB_MW_W(mw1) +EPF_NTB_MW_R(mw2) +EPF_NTB_MW_W(mw2) +EPF_NTB_MW_R(mw3) +EPF_NTB_MW_W(mw3) +EPF_NTB_MW_R(mw4) +EPF_NTB_MW_W(mw4) + +CONFIGFS_ATTR(epf_ntb_, spad_count); +CONFIGFS_ATTR(epf_ntb_, db_count); +CONFIGFS_ATTR(epf_ntb_, num_mws); +CONFIGFS_ATTR(epf_ntb_, mw1); +CONFIGFS_ATTR(epf_ntb_, mw2); +CONFIGFS_ATTR(epf_ntb_, mw3); +CONFIGFS_ATTR(epf_ntb_, mw4); +CONFIGFS_ATTR(epf_ntb_, vbus_number); +CONFIGFS_ATTR(epf_ntb_, vntb_pid); +CONFIGFS_ATTR(epf_ntb_, vntb_vid); + +static struct configfs_attribute *epf_ntb_attrs[] = { + &epf_ntb_attr_spad_count, + &epf_ntb_attr_db_count, + &epf_ntb_attr_num_mws, + &epf_ntb_attr_mw1, + &epf_ntb_attr_mw2, + &epf_ntb_attr_mw3, + &epf_ntb_attr_mw4, + &epf_ntb_attr_vbus_number, + &epf_ntb_attr_vntb_pid, + &epf_ntb_attr_vntb_vid, + NULL, +}; + +static const struct config_item_type ntb_group_type = { + .ct_attrs = epf_ntb_attrs, + .ct_owner = THIS_MODULE, +}; + +/** + * epf_ntb_add_cfs() - Add configfs directory specific to NTB + * @epf: NTB endpoint function device + * @group: A pointer to the config_group structure referencing a group of + * config_items of a specific type that belong to a specific sub-system. + * + * Add configfs directory specific to NTB. This directory will hold + * NTB specific properties like db_count, spad_count, num_mws etc., + * + * Returns: Pointer to config_group + */ +static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf, + struct config_group *group) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct config_group *ntb_group = &ntb->group; + struct device *dev = &epf->dev; + + config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type); + + return ntb_group; +} + +/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/ + +static u32 pci_space[] = { + 0xffffffff, /*DeviceID, Vendor ID*/ + 0, /*Status, Command*/ + 0xffffffff, /*Class code, subclass, prog if, revision id*/ + 0x40, /*bist, header type, latency Timer, cache line size*/ + 0, /*BAR 0*/ + 0, /*BAR 1*/ + 0, /*BAR 2*/ + 0, /*BAR 3*/ + 0, /*BAR 4*/ + 0, /*BAR 5*/ + 0, /*Cardbus cis point*/ + 0, /*Subsystem ID Subystem vendor id*/ + 0, /*ROM Base Address*/ + 0, /*Reserved, Cap. Point*/ + 0, /*Reserved,*/ + 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/ +}; + +static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) +{ + if (devfn == 0) { + memcpy(val, ((u8 *)pci_space) + where, size); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) +{ + return 0; +} + +static struct pci_ops vpci_ops = { + .read = pci_read, + .write = pci_write, +}; + +static int vpci_scan_bus(void *sysdata) +{ + struct pci_bus *vpci_bus; + struct epf_ntb *ndev = sysdata; + + vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata); + if (vpci_bus) + pr_err("create pci bus\n"); + + pci_bus_add_devices(vpci_bus); + + return 0; +} + +/*==================== Virtual PCIe NTB driver ==========================*/ + +static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct epf_ntb *ndev = ntb_ndev(ntb); + + return ndev->num_mws; +} + +static int vntb_epf_spad_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->spad_count; +} + +static int vntb_epf_peer_mw_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->num_mws; +} + +static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb) +{ + return BIT_ULL(ntb_ndev(ntb)->db_count) - 1; +} + +static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + return 0; +} + +static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + int ret; + struct device *dev; + + dev = &ntb->ntb.dev; + barno = ntb->epf_ntb_bar[BAR_MW0 + idx]; + epf_bar = &ntb->epf->bar[barno]; + epf_bar->phys_addr = addr; + epf_bar->barno = barno; + epf_bar->size = size; + + ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar); + if (ret) { + dev_err(dev, "failure set mw trans\n"); + return ret; + } + return 0; +} + +static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx) +{ + return 0; +} + +static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx, + phys_addr_t *base, resource_size_t *size) +{ + + struct epf_ntb *ntb = ntb_ndev(ndev); + + if (base) + *base = ntb->vpci_mw_phy[idx]; + + if (size) + *size = ntb->mws_size[idx]; + + return 0; +} + +static int vntb_epf_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + return 0; +} + +static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4; + u32 val; + void __iomem *base = ntb->reg; + + val = readl(base + off + ct + idx * 4); + return val; +} + +static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset, ct = ctrl->spad_count * 4; + void __iomem *base = ntb->reg; + + writel(val, base + off + ct + idx * 4); + return 0; +} + +static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset; + void __iomem *base = ntb->reg; + u32 val; + + val = readl(base + off + idx * 4); + return val; +} + +static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset; + void __iomem *base = ntb->reg; + + writel(val, base + off + idx * 4); + return 0; +} + +static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits) +{ + u32 interrupt_num = ffs(db_bits) + 1; + struct epf_ntb *ntb = ntb_ndev(ndev); + u8 func_no, vfunc_no; + int ret; + + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = pci_epc_raise_irq(ntb->epf->epc, + func_no, + vfunc_no, + PCI_EPC_IRQ_MSI, + interrupt_num + 1); + if (ret) + dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n"); + + return ret; +} + +static u64 vntb_epf_db_read(struct ntb_dev *ndev) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + return ntb->db; +} + +static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + if (addr_align) + *addr_align = SZ_4K; + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = ntb->mws_size[idx]; + + return 0; +} + +static u64 vntb_epf_link_is_up(struct ntb_dev *ndev, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + return ntb->reg->link_status; +} + +static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits) +{ + return 0; +} + +static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + ntb->db &= ~db_bits; + return 0; +} + +static int vntb_epf_link_disable(struct ntb_dev *ntb) +{ + return 0; +} + +static const struct ntb_dev_ops vntb_epf_ops = { + .mw_count = vntb_epf_mw_count, + .spad_count = vntb_epf_spad_count, + .peer_mw_count = vntb_epf_peer_mw_count, + .db_valid_mask = vntb_epf_db_valid_mask, + .db_set_mask = vntb_epf_db_set_mask, + .mw_set_trans = vntb_epf_mw_set_trans, + .mw_clear_trans = vntb_epf_mw_clear_trans, + .peer_mw_get_addr = vntb_epf_peer_mw_get_addr, + .link_enable = vntb_epf_link_enable, + .spad_read = vntb_epf_spad_read, + .spad_write = vntb_epf_spad_write, + .peer_spad_read = vntb_epf_peer_spad_read, + .peer_spad_write = vntb_epf_peer_spad_write, + .peer_db_set = vntb_epf_peer_db_set, + .db_read = vntb_epf_db_read, + .mw_get_align = vntb_epf_mw_get_align, + .link_is_up = vntb_epf_link_is_up, + .db_clear_mask = vntb_epf_db_clear_mask, + .db_clear = vntb_epf_db_clear, + .link_disable = vntb_epf_link_disable, +}; + +static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata; + struct device *dev = &pdev->dev; + + ndev->ntb.pdev = pdev; + ndev->ntb.topo = NTB_TOPO_NONE; + ndev->ntb.ops = &vntb_epf_ops; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "Cannot set DMA mask\n"); + return -EINVAL; + } + + ret = ntb_register_device(&ndev->ntb); + if (ret) { + dev_err(dev, "Failed to register NTB device\n"); + goto err_register_dev; + } + + dev_dbg(dev, "PCI Virtual NTB driver loaded\n"); + return 0; + +err_register_dev: + return -EINVAL; +} + +static struct pci_device_id pci_vntb_table[] = { + { + PCI_DEVICE(0xffff, 0xffff), + }, + {}, +}; + +static struct pci_driver vntb_pci_driver = { + .name = "pci-vntb", + .id_table = pci_vntb_table, + .probe = pci_vntb_probe, +}; + +/* ============ PCIe EPF Driver Bind ====================*/ + +/** + * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality + * @epf: NTB endpoint function device + * + * Initialize both the endpoint controllers associated with NTB function device. + * Invoked when a primary interface or secondary interface is bound to EPC + * device. This function will succeed only when EPC is bound to both the + * interfaces. + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_bind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct device *dev = &epf->dev; + int ret; + + if (!epf->epc) { + dev_dbg(dev, "PRIMARY EPC interface not yet bound\n"); + return 0; + } + + ret = epf_ntb_init_epc_bar(ntb); + if (ret) { + dev_err(dev, "Failed to create NTB EPC\n"); + goto err_bar_init; + } + + ret = epf_ntb_config_spad_bar_alloc(ntb); + if (ret) { + dev_err(dev, "Failed to allocate BAR memory\n"); + goto err_bar_alloc; + } + + ret = epf_ntb_epc_init(ntb); + if (ret) { + dev_err(dev, "Failed to initialize EPC\n"); + goto err_bar_alloc; + } + + epf_set_drvdata(epf, ntb); + + pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid; + pci_vntb_table[0].vendor = ntb->vntb_vid; + pci_vntb_table[0].device = ntb->vntb_pid; + + ret = pci_register_driver(&vntb_pci_driver); + if (ret) { + dev_err(dev, "failure register vntb pci driver\n"); + goto err_bar_alloc; + } + + vpci_scan_bus(ntb); + + return 0; + +err_bar_alloc: + epf_ntb_config_spad_bar_free(ntb); + +err_bar_init: + epf_ntb_epc_destroy(ntb); + + return ret; +} + +/** + * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind() + * @epf: NTB endpoint function device + * + * Cleanup the initialization from epf_ntb_bind() + */ +static void epf_ntb_unbind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + + epf_ntb_epc_cleanup(ntb); + epf_ntb_config_spad_bar_free(ntb); + epf_ntb_epc_destroy(ntb); + + pci_unregister_driver(&vntb_pci_driver); +} + +// EPF driver probe +static struct pci_epf_ops epf_ntb_ops = { + .bind = epf_ntb_bind, + .unbind = epf_ntb_unbind, + .add_cfs = epf_ntb_add_cfs, +}; + +/** + * epf_ntb_probe() - Probe NTB function driver + * @epf: NTB endpoint function device + * + * Probe NTB function driver when endpoint function bus detects a NTB + * endpoint function. + * + * Returns: Zero for success, or an error code in case of failure + */ +static int epf_ntb_probe(struct pci_epf *epf) +{ + struct epf_ntb *ntb; + struct device *dev; + + dev = &epf->dev; + + ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL); + if (!ntb) + return -ENOMEM; + + epf->header = &epf_ntb_header; + ntb->epf = epf; + ntb->vbus_number = 0xff; + epf_set_drvdata(epf, ntb); + + dev_info(dev, "pci-ep epf driver loaded\n"); + return 0; +} + +static const struct pci_epf_device_id epf_ntb_ids[] = { + { + .name = "pci_epf_vntb", + }, + {}, +}; + +static struct pci_epf_driver epf_ntb_driver = { + .driver.name = "pci_epf_vntb", + .probe = epf_ntb_probe, + .id_table = epf_ntb_ids, + .ops = &epf_ntb_ops, + .owner = THIS_MODULE, +}; + +static int __init epf_ntb_init(void) +{ + int ret; + + kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM | + WQ_HIGHPRI, 0); + ret = pci_epf_register_driver(&epf_ntb_driver); + if (ret) { + destroy_workqueue(kpcintb_workqueue); + pr_err("Failed to register pci epf ntb driver --> %d\n", ret); + return ret; + } + + return 0; +} +module_init(epf_ntb_init); + +static void __exit epf_ntb_exit(void) +{ + pci_epf_unregister_driver(&epf_ntb_driver); + destroy_workqueue(kpcintb_workqueue); +} +module_exit(epf_ntb_exit); + +MODULE_DESCRIPTION("PCI EPF NTB DRIVER"); +MODULE_AUTHOR("Frank Li <Frank.li@nxp.com>"); +MODULE_LICENSE("GPL v2"); |