From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/ntb/hw/Kconfig | 6 + drivers/ntb/hw/Makefile | 6 + drivers/ntb/hw/amd/Kconfig | 8 + drivers/ntb/hw/amd/Makefile | 2 + drivers/ntb/hw/amd/ntb_hw_amd.c | 1355 +++++++++++++++ drivers/ntb/hw/amd/ntb_hw_amd.h | 223 +++ drivers/ntb/hw/epf/Kconfig | 6 + drivers/ntb/hw/epf/Makefile | 1 + drivers/ntb/hw/epf/ntb_hw_epf.c | 771 +++++++++ drivers/ntb/hw/idt/Kconfig | 31 + drivers/ntb/hw/idt/Makefile | 2 + drivers/ntb/hw/idt/ntb_hw_idt.c | 2912 ++++++++++++++++++++++++++++++++ drivers/ntb/hw/idt/ntb_hw_idt.h | 1226 ++++++++++++++ drivers/ntb/hw/intel/Kconfig | 8 + drivers/ntb/hw/intel/Makefile | 3 + drivers/ntb/hw/intel/ntb_hw_gen1.c | 2085 +++++++++++++++++++++++ drivers/ntb/hw/intel/ntb_hw_gen1.h | 185 ++ drivers/ntb/hw/intel/ntb_hw_gen3.c | 627 +++++++ drivers/ntb/hw/intel/ntb_hw_gen3.h | 118 ++ drivers/ntb/hw/intel/ntb_hw_gen4.c | 592 +++++++ drivers/ntb/hw/intel/ntb_hw_gen4.h | 131 ++ drivers/ntb/hw/intel/ntb_hw_intel.h | 238 +++ drivers/ntb/hw/mscc/Kconfig | 10 + drivers/ntb/hw/mscc/Makefile | 2 + drivers/ntb/hw/mscc/ntb_hw_switchtec.c | 1577 +++++++++++++++++ 25 files changed, 12125 insertions(+) create mode 100644 drivers/ntb/hw/Kconfig create mode 100644 drivers/ntb/hw/Makefile create mode 100644 drivers/ntb/hw/amd/Kconfig create mode 100644 drivers/ntb/hw/amd/Makefile create mode 100644 drivers/ntb/hw/amd/ntb_hw_amd.c create mode 100644 drivers/ntb/hw/amd/ntb_hw_amd.h create mode 100644 drivers/ntb/hw/epf/Kconfig create mode 100644 drivers/ntb/hw/epf/Makefile create mode 100644 drivers/ntb/hw/epf/ntb_hw_epf.c create mode 100644 drivers/ntb/hw/idt/Kconfig create mode 100644 drivers/ntb/hw/idt/Makefile create mode 100644 drivers/ntb/hw/idt/ntb_hw_idt.c create mode 100644 drivers/ntb/hw/idt/ntb_hw_idt.h create mode 100644 drivers/ntb/hw/intel/Kconfig create mode 100644 drivers/ntb/hw/intel/Makefile create mode 100644 drivers/ntb/hw/intel/ntb_hw_gen1.c create mode 100644 drivers/ntb/hw/intel/ntb_hw_gen1.h create mode 100644 drivers/ntb/hw/intel/ntb_hw_gen3.c create mode 100644 drivers/ntb/hw/intel/ntb_hw_gen3.h create mode 100644 drivers/ntb/hw/intel/ntb_hw_gen4.c create mode 100644 drivers/ntb/hw/intel/ntb_hw_gen4.h create mode 100644 drivers/ntb/hw/intel/ntb_hw_intel.h create mode 100644 drivers/ntb/hw/mscc/Kconfig create mode 100644 drivers/ntb/hw/mscc/Makefile create mode 100644 drivers/ntb/hw/mscc/ntb_hw_switchtec.c (limited to 'drivers/ntb/hw') diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig new file mode 100644 index 0000000000..c325be526b --- /dev/null +++ b/drivers/ntb/hw/Kconfig @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +source "drivers/ntb/hw/amd/Kconfig" +source "drivers/ntb/hw/idt/Kconfig" +source "drivers/ntb/hw/intel/Kconfig" +source "drivers/ntb/hw/epf/Kconfig" +source "drivers/ntb/hw/mscc/Kconfig" diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile new file mode 100644 index 0000000000..223ca592b5 --- /dev/null +++ b/drivers/ntb/hw/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NTB_AMD) += amd/ +obj-$(CONFIG_NTB_IDT) += idt/ +obj-$(CONFIG_NTB_INTEL) += intel/ +obj-$(CONFIG_NTB_EPF) += epf/ +obj-$(CONFIG_NTB_SWITCHTEC) += mscc/ diff --git a/drivers/ntb/hw/amd/Kconfig b/drivers/ntb/hw/amd/Kconfig new file mode 100644 index 0000000000..dd9fb9e5c8 --- /dev/null +++ b/drivers/ntb/hw/amd/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NTB_AMD + tristate "AMD Non-Transparent Bridge support" + depends on X86_64 + help + This driver supports AMD NTB on capable Zeppelin hardware. + + If unsure, say N. diff --git a/drivers/ntb/hw/amd/Makefile b/drivers/ntb/hw/amd/Makefile new file mode 100644 index 0000000000..7c23b4a7f3 --- /dev/null +++ b/drivers/ntb/hw/amd/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NTB_AMD) += ntb_hw_amd.o diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c new file mode 100644 index 0000000000..d687e8c2cc --- /dev/null +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -0,0 +1,1355 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of AMD Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * AMD PCIe NTB Linux driver + * + * Contact Information: + * Xiangliang Yu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ntb_hw_amd.h" + +#define NTB_NAME "ntb_hw_amd" +#define NTB_DESC "AMD(R) PCI-E Non-Transparent Bridge Driver" +#define NTB_VER "1.0" + +MODULE_DESCRIPTION(NTB_DESC); +MODULE_VERSION(NTB_VER); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("AMD Inc."); + +static const struct file_operations amd_ntb_debugfs_info; +static struct dentry *debugfs_dir; + +static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx) +{ + if (idx < 0 || idx > ndev->mw_count) + return -EINVAL; + + return ndev->dev_data->mw_idx << idx; +} + +static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx) +{ + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + return ntb_ndev(ntb)->mw_count; +} + +static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + if (addr_align) + *addr_align = SZ_4K; + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = pci_resource_len(ndev->ntb.pdev, bar); + + return 0; +} + +static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + unsigned long xlat_reg, limit_reg = 0; + resource_size_t mw_size; + void __iomem *mmio, *peer_mmio; + u64 base_addr, limit, reg_val; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + mw_size = pci_resource_len(ntb->pdev, bar); + + /* make sure the range fits in the usable mw size */ + if (size > mw_size) + return -EINVAL; + + mmio = ndev->self_mmio; + peer_mmio = ndev->peer_mmio; + + base_addr = pci_resource_start(ntb->pdev, bar); + + if (bar != 1) { + xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2); + limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2); + + /* Set the limit if supported */ + limit = size; + + /* set and verify setting the translation address */ + write64(addr, peer_mmio + xlat_reg); + reg_val = read64(peer_mmio + xlat_reg); + if (reg_val != addr) { + write64(0, peer_mmio + xlat_reg); + return -EIO; + } + + /* set and verify setting the limit */ + write64(limit, peer_mmio + limit_reg); + reg_val = read64(peer_mmio + limit_reg); + if (reg_val != limit) { + write64(base_addr, mmio + limit_reg); + write64(0, peer_mmio + xlat_reg); + return -EIO; + } + } else { + xlat_reg = AMD_BAR1XLAT_OFFSET; + limit_reg = AMD_BAR1LMT_OFFSET; + + /* Set the limit if supported */ + limit = size; + + /* set and verify setting the translation address */ + write64(addr, peer_mmio + xlat_reg); + reg_val = read64(peer_mmio + xlat_reg); + if (reg_val != addr) { + write64(0, peer_mmio + xlat_reg); + return -EIO; + } + + /* set and verify setting the limit */ + writel(limit, peer_mmio + limit_reg); + reg_val = readl(peer_mmio + limit_reg); + if (reg_val != limit) { + writel(base_addr, mmio + limit_reg); + writel(0, peer_mmio + xlat_reg); + return -EIO; + } + } + + return 0; +} + +static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev) +{ + struct pci_dev *pdev = NULL; + struct pci_dev *pci_swds = NULL; + struct pci_dev *pci_swus = NULL; + u32 stat; + int rc; + + if (ndev->ntb.topo == NTB_TOPO_SEC) { + /* Locate the pointer to Downstream Switch for this device */ + pci_swds = pci_upstream_bridge(ndev->ntb.pdev); + if (pci_swds) { + /* + * Locate the pointer to Upstream Switch for + * the Downstream Switch. + */ + pci_swus = pci_upstream_bridge(pci_swds); + if (pci_swus) { + rc = pcie_capability_read_dword(pci_swus, + PCI_EXP_LNKCTL, + &stat); + if (rc) + return 0; + } else { + return 0; + } + } else { + return 0; + } + } else if (ndev->ntb.topo == NTB_TOPO_PRI) { + /* + * For NTB primary, we simply read the Link Status and control + * register of the NTB device itself. + */ + pdev = ndev->ntb.pdev; + rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat); + if (rc) + return 0; + } else { + /* Catch all for everything else */ + return 0; + } + + ndev->lnk_sta = stat; + + return 1; +} + +static int amd_link_is_up(struct amd_ntb_dev *ndev) +{ + int ret; + + /* + * We consider the link to be up under two conditions: + * + * - When a link-up event is received. This is indicated by + * AMD_LINK_UP_EVENT set in peer_sta. + * - When driver on both sides of the link have been loaded. + * This is indicated by bit 1 being set in the peer + * SIDEINFO register. + * + * This function should return 1 when the latter of the above + * two conditions is true. + * + * Now consider the sequence of events - Link-Up event occurs, + * then the peer side driver loads. In this case, we would have + * received LINK_UP event and bit 1 of peer SIDEINFO is also + * set. What happens now if the link goes down? Bit 1 of + * peer SIDEINFO remains set, but LINK_DOWN bit is set in + * peer_sta. So we should return 0 from this function. Not only + * that, we clear bit 1 of peer SIDEINFO to 0, since the peer + * side driver did not even get a chance to clear it before + * the link went down. This can be the case of surprise link + * removal. + * + * LINK_UP event will always occur before the peer side driver + * gets loaded the very first time. So there can be a case when + * the LINK_UP event has occurred, but the peer side driver hasn't + * yet loaded. We return 0 in that case. + * + * There is also a special case when the primary side driver is + * unloaded and then loaded again. Since there is no change in + * the status of NTB secondary in this case, there is no Link-Up + * or Link-Down notification received. We recognize this condition + * with peer_sta being set to 0. + * + * If bit 1 of peer SIDEINFO register is not set, then we + * simply return 0 irrespective of the link up or down status + * set in peer_sta. + */ + ret = amd_poll_link(ndev); + if (ret) { + /* + * We need to check the below only for NTB primary. For NTB + * secondary, simply checking the result of PSIDE_INFO + * register will suffice. + */ + if (ndev->ntb.topo == NTB_TOPO_PRI) { + if ((ndev->peer_sta & AMD_LINK_UP_EVENT) || + (ndev->peer_sta == 0)) + return ret; + else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) { + /* Clear peer sideinfo register */ + amd_clear_side_info_reg(ndev, true); + + return 0; + } + } else { /* NTB_TOPO_SEC */ + return ret; + } + } + + return 0; +} + +static u64 amd_ntb_link_is_up(struct ntb_dev *ntb, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + int ret = 0; + + if (amd_link_is_up(ndev)) { + if (speed) + *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta); + if (width) + *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); + + dev_dbg(&ntb->pdev->dev, "link is up.\n"); + + ret = 1; + } else { + if (speed) + *speed = NTB_SPEED_NONE; + if (width) + *width = NTB_WIDTH_NONE; + + dev_dbg(&ntb->pdev->dev, "link is down.\n"); + } + + return ret; +} + +static int amd_ntb_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + + /* Enable event interrupt */ + ndev->int_mask &= ~AMD_EVENT_INTMASK; + writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); + + if (ndev->ntb.topo == NTB_TOPO_SEC) + return -EINVAL; + dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); + + return 0; +} + +static int amd_ntb_link_disable(struct ntb_dev *ntb) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + + /* Disable event interrupt */ + ndev->int_mask |= AMD_EVENT_INTMASK; + writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); + + if (ndev->ntb.topo == NTB_TOPO_SEC) + return -EINVAL; + dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); + + return 0; +} + +static int amd_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + /* The same as for inbound MWs */ + return ntb_ndev(ntb)->mw_count; +} + +static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + int bar; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + if (base) + *base = pci_resource_start(ndev->ntb.pdev, bar); + + if (size) + *size = pci_resource_len(ndev->ntb.pdev, bar); + + return 0; +} + +static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->db_valid_mask; +} + +static int amd_ntb_db_vector_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->db_count; +} + +static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + + if (db_vector < 0 || db_vector > ndev->db_count) + return 0; + + return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector); +} + +static u64 amd_ntb_db_read(struct ntb_dev *ntb) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + + return (u64)readw(mmio + AMD_DBSTAT_OFFSET); +} + +static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + + writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET); + + return 0; +} + +static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + unsigned long flags; + + if (db_bits & ~ndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&ndev->db_mask_lock, flags); + ndev->db_mask |= db_bits; + writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET); + spin_unlock_irqrestore(&ndev->db_mask_lock, flags); + + return 0; +} + +static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + unsigned long flags; + + if (db_bits & ~ndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&ndev->db_mask_lock, flags); + ndev->db_mask &= ~db_bits; + writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET); + spin_unlock_irqrestore(&ndev->db_mask_lock, flags); + + return 0; +} + +static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + + writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET); + + return 0; +} + +static int amd_ntb_spad_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->spad_count; +} + +static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + u32 offset; + + if (idx < 0 || idx >= ndev->spad_count) + return 0; + + offset = ndev->self_spad + (idx << 2); + return readl(mmio + AMD_SPAD_OFFSET + offset); +} + +static int amd_ntb_spad_write(struct ntb_dev *ntb, + int idx, u32 val) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + u32 offset; + + if (idx < 0 || idx >= ndev->spad_count) + return -EINVAL; + + offset = ndev->self_spad + (idx << 2); + writel(val, mmio + AMD_SPAD_OFFSET + offset); + + return 0; +} + +static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + u32 offset; + + if (sidx < 0 || sidx >= ndev->spad_count) + return -EINVAL; + + offset = ndev->peer_spad + (sidx << 2); + return readl(mmio + AMD_SPAD_OFFSET + offset); +} + +static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, + int sidx, u32 val) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + void __iomem *mmio = ndev->self_mmio; + u32 offset; + + if (sidx < 0 || sidx >= ndev->spad_count) + return -EINVAL; + + offset = ndev->peer_spad + (sidx << 2); + writel(val, mmio + AMD_SPAD_OFFSET + offset); + + return 0; +} + +static const struct ntb_dev_ops amd_ntb_ops = { + .mw_count = amd_ntb_mw_count, + .mw_get_align = amd_ntb_mw_get_align, + .mw_set_trans = amd_ntb_mw_set_trans, + .peer_mw_count = amd_ntb_peer_mw_count, + .peer_mw_get_addr = amd_ntb_peer_mw_get_addr, + .link_is_up = amd_ntb_link_is_up, + .link_enable = amd_ntb_link_enable, + .link_disable = amd_ntb_link_disable, + .db_valid_mask = amd_ntb_db_valid_mask, + .db_vector_count = amd_ntb_db_vector_count, + .db_vector_mask = amd_ntb_db_vector_mask, + .db_read = amd_ntb_db_read, + .db_clear = amd_ntb_db_clear, + .db_set_mask = amd_ntb_db_set_mask, + .db_clear_mask = amd_ntb_db_clear_mask, + .peer_db_set = amd_ntb_peer_db_set, + .spad_count = amd_ntb_spad_count, + .spad_read = amd_ntb_spad_read, + .spad_write = amd_ntb_spad_write, + .peer_spad_read = amd_ntb_peer_spad_read, + .peer_spad_write = amd_ntb_peer_spad_write, +}; + +static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit) +{ + void __iomem *mmio = ndev->self_mmio; + int reg; + + reg = readl(mmio + AMD_SMUACK_OFFSET); + reg |= bit; + writel(reg, mmio + AMD_SMUACK_OFFSET); +} + +static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) +{ + void __iomem *mmio = ndev->self_mmio; + struct device *dev = &ndev->ntb.pdev->dev; + u32 status; + + status = readl(mmio + AMD_INTSTAT_OFFSET); + if (!(status & AMD_EVENT_INTMASK)) + return; + + dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec); + + status &= AMD_EVENT_INTMASK; + switch (status) { + case AMD_PEER_FLUSH_EVENT: + ndev->peer_sta |= AMD_PEER_FLUSH_EVENT; + dev_info(dev, "Flush is done.\n"); + break; + case AMD_PEER_RESET_EVENT: + case AMD_LINK_DOWN_EVENT: + ndev->peer_sta |= status; + if (status == AMD_LINK_DOWN_EVENT) + ndev->peer_sta &= ~AMD_LINK_UP_EVENT; + + amd_ack_smu(ndev, status); + + /* link down first */ + ntb_link_event(&ndev->ntb); + /* polling peer status */ + schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); + + break; + case AMD_PEER_D3_EVENT: + case AMD_PEER_PMETO_EVENT: + case AMD_LINK_UP_EVENT: + ndev->peer_sta |= status; + if (status == AMD_LINK_UP_EVENT) + ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT; + else if (status == AMD_PEER_D3_EVENT) + ndev->peer_sta &= ~AMD_PEER_D0_EVENT; + + amd_ack_smu(ndev, status); + + /* link down */ + ntb_link_event(&ndev->ntb); + + break; + case AMD_PEER_D0_EVENT: + mmio = ndev->peer_mmio; + status = readl(mmio + AMD_PMESTAT_OFFSET); + /* check if this is WAKEUP event */ + if (status & 0x1) + dev_info(dev, "Wakeup is done.\n"); + + ndev->peer_sta |= AMD_PEER_D0_EVENT; + ndev->peer_sta &= ~AMD_PEER_D3_EVENT; + amd_ack_smu(ndev, AMD_PEER_D0_EVENT); + + /* start a timer to poll link status */ + schedule_delayed_work(&ndev->hb_timer, + AMD_LINK_HB_TIMEOUT); + break; + default: + dev_info(dev, "event status = 0x%x.\n", status); + break; + } + + /* Clear the interrupt status */ + writel(status, mmio + AMD_INTSTAT_OFFSET); +} + +static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec) +{ + struct device *dev = &ndev->ntb.pdev->dev; + u64 status; + + status = amd_ntb_db_read(&ndev->ntb); + + dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec); + + /* + * Since we had reserved highest order bit of DB for signaling peer of + * a special event, this is the only status bit we should be concerned + * here now. + */ + if (status & BIT(ndev->db_last_bit)) { + ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit)); + /* send link down event notification */ + ntb_link_event(&ndev->ntb); + + /* + * If we are here, that means the peer has signalled a special + * event which notifies that the peer driver has been + * un-loaded for some reason. Since there is a chance that the + * peer will load its driver again sometime, we schedule link + * polling routine. + */ + schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); + } +} + +static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) +{ + dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec); + + if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1)) + amd_handle_event(ndev, vec); + + if (vec < AMD_DB_CNT) { + amd_handle_db_event(ndev, vec); + ntb_db_event(&ndev->ntb, vec); + } + + return IRQ_HANDLED; +} + +static irqreturn_t ndev_vec_isr(int irq, void *dev) +{ + struct amd_ntb_vec *nvec = dev; + + return ndev_interrupt(nvec->ndev, nvec->num); +} + +static irqreturn_t ndev_irq_isr(int irq, void *dev) +{ + struct amd_ntb_dev *ndev = dev; + + return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); +} + +static int ndev_init_isr(struct amd_ntb_dev *ndev, + int msix_min, int msix_max) +{ + struct pci_dev *pdev; + int rc, i, msix_count, node; + + pdev = ndev->ntb.pdev; + + node = dev_to_node(&pdev->dev); + + ndev->db_mask = ndev->db_valid_mask; + + /* Try to set up msix irq */ + ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec), + GFP_KERNEL, node); + if (!ndev->vec) + goto err_msix_vec_alloc; + + ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix), + GFP_KERNEL, node); + if (!ndev->msix) + goto err_msix_alloc; + + for (i = 0; i < msix_max; ++i) + ndev->msix[i].entry = i; + + msix_count = pci_enable_msix_range(pdev, ndev->msix, + msix_min, msix_max); + if (msix_count < 0) + goto err_msix_enable; + + /* NOTE: Disable MSIX if msix count is less than 16 because of + * hardware limitation. + */ + if (msix_count < msix_min) { + pci_disable_msix(pdev); + goto err_msix_enable; + } + + for (i = 0; i < msix_count; ++i) { + ndev->vec[i].ndev = ndev; + ndev->vec[i].num = i; + rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, + "ndev_vec_isr", &ndev->vec[i]); + if (rc) + goto err_msix_request; + } + + dev_dbg(&pdev->dev, "Using msix interrupts\n"); + ndev->db_count = msix_min; + ndev->msix_vec_count = msix_max; + return 0; + +err_msix_request: + while (i-- > 0) + free_irq(ndev->msix[i].vector, &ndev->vec[i]); + pci_disable_msix(pdev); +err_msix_enable: + kfree(ndev->msix); +err_msix_alloc: + kfree(ndev->vec); +err_msix_vec_alloc: + ndev->msix = NULL; + ndev->vec = NULL; + + /* Try to set up msi irq */ + rc = pci_enable_msi(pdev); + if (rc) + goto err_msi_enable; + + rc = request_irq(pdev->irq, ndev_irq_isr, 0, + "ndev_irq_isr", ndev); + if (rc) + goto err_msi_request; + + dev_dbg(&pdev->dev, "Using msi interrupts\n"); + ndev->db_count = 1; + ndev->msix_vec_count = 1; + return 0; + +err_msi_request: + pci_disable_msi(pdev); +err_msi_enable: + + /* Try to set up intx irq */ + pci_intx(pdev, 1); + + rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED, + "ndev_irq_isr", ndev); + if (rc) + goto err_intx_request; + + dev_dbg(&pdev->dev, "Using intx interrupts\n"); + ndev->db_count = 1; + ndev->msix_vec_count = 1; + return 0; + +err_intx_request: + return rc; +} + +static void ndev_deinit_isr(struct amd_ntb_dev *ndev) +{ + struct pci_dev *pdev; + void __iomem *mmio = ndev->self_mmio; + int i; + + pdev = ndev->ntb.pdev; + + /* Mask all doorbell interrupts */ + ndev->db_mask = ndev->db_valid_mask; + writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET); + + if (ndev->msix) { + i = ndev->msix_vec_count; + while (i--) + free_irq(ndev->msix[i].vector, &ndev->vec[i]); + pci_disable_msix(pdev); + kfree(ndev->msix); + kfree(ndev->vec); + } else { + free_irq(pdev->irq, ndev); + if (pci_dev_msi_enabled(pdev)) + pci_disable_msi(pdev); + else + pci_intx(pdev, 0); + } +} + +static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct amd_ntb_dev *ndev; + void __iomem *mmio; + char *buf; + size_t buf_size; + ssize_t ret, off; + union { u64 v64; u32 v32; u16 v16; } u; + + ndev = filp->private_data; + mmio = ndev->self_mmio; + + buf_size = min(count, 0x800ul); + + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + off = 0; + + off += scnprintf(buf + off, buf_size - off, + "NTB Device Information:\n"); + + off += scnprintf(buf + off, buf_size - off, + "Connection Topology -\t%s\n", + ntb_topo_string(ndev->ntb.topo)); + + off += scnprintf(buf + off, buf_size - off, + "LNK STA -\t\t%#06x\n", ndev->lnk_sta); + + if (!amd_link_is_up(ndev)) { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tDown\n"); + } else { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tUp\n"); + off += scnprintf(buf + off, buf_size - off, + "Link Speed -\t\tPCI-E Gen %u\n", + NTB_LNK_STA_SPEED(ndev->lnk_sta)); + off += scnprintf(buf + off, buf_size - off, + "Link Width -\t\tx%u\n", + NTB_LNK_STA_WIDTH(ndev->lnk_sta)); + } + + off += scnprintf(buf + off, buf_size - off, + "Memory Window Count -\t%u\n", ndev->mw_count); + off += scnprintf(buf + off, buf_size - off, + "Scratchpad Count -\t%u\n", ndev->spad_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Count -\t%u\n", ndev->db_count); + off += scnprintf(buf + off, buf_size - off, + "MSIX Vector Count -\t%u\n", ndev->msix_vec_count); + + off += scnprintf(buf + off, buf_size - off, + "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); + + u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask -\t\t\t%#06x\n", u.v32); + + u.v32 = readl(mmio + AMD_DBSTAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Bell -\t\t\t%#06x\n", u.v32); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Incoming XLAT:\n"); + + u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "XLAT1 -\t\t%#018llx\n", u.v64); + + u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "XLAT23 -\t\t%#018llx\n", u.v64); + + u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "XLAT45 -\t\t%#018llx\n", u.v64); + + u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "LMT1 -\t\t\t%#06x\n", u.v32); + + u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "LMT23 -\t\t\t%#018llx\n", u.v64); + + u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "LMT45 -\t\t\t%#018llx\n", u.v64); + + ret = simple_read_from_buffer(ubuf, count, offp, buf, off); + kfree(buf); + return ret; +} + +static void ndev_init_debugfs(struct amd_ntb_dev *ndev) +{ + if (!debugfs_dir) { + ndev->debugfs_dir = NULL; + ndev->debugfs_info = NULL; + } else { + ndev->debugfs_dir = + debugfs_create_dir(pci_name(ndev->ntb.pdev), + debugfs_dir); + ndev->debugfs_info = + debugfs_create_file("info", S_IRUSR, + ndev->debugfs_dir, ndev, + &amd_ntb_debugfs_info); + } +} + +static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev) +{ + debugfs_remove_recursive(ndev->debugfs_dir); +} + +static inline void ndev_init_struct(struct amd_ntb_dev *ndev, + struct pci_dev *pdev) +{ + ndev->ntb.pdev = pdev; + ndev->ntb.topo = NTB_TOPO_NONE; + ndev->ntb.ops = &amd_ntb_ops; + ndev->int_mask = AMD_EVENT_INTMASK; + spin_lock_init(&ndev->db_mask_lock); +} + +static int amd_poll_link(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->peer_mmio; + u32 reg; + + reg = readl(mmio + AMD_SIDEINFO_OFFSET); + reg &= AMD_SIDE_READY; + + dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg); + + ndev->cntl_sta = reg; + + amd_ntb_get_link_status(ndev); + + return ndev->cntl_sta; +} + +static void amd_link_hb(struct work_struct *work) +{ + struct amd_ntb_dev *ndev = hb_ndev(work); + + if (amd_poll_link(ndev)) + ntb_link_event(&ndev->ntb); + + if (!amd_link_is_up(ndev)) + schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); +} + +static int amd_init_isr(struct amd_ntb_dev *ndev) +{ + return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT); +} + +static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer) +{ + void __iomem *mmio = NULL; + unsigned int reg; + + if (peer) + mmio = ndev->peer_mmio; + else + mmio = ndev->self_mmio; + + reg = readl(mmio + AMD_SIDEINFO_OFFSET); + if (!(reg & AMD_SIDE_READY)) { + reg |= AMD_SIDE_READY; + writel(reg, mmio + AMD_SIDEINFO_OFFSET); + } +} + +static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer) +{ + void __iomem *mmio = NULL; + unsigned int reg; + + if (peer) + mmio = ndev->peer_mmio; + else + mmio = ndev->self_mmio; + + reg = readl(mmio + AMD_SIDEINFO_OFFSET); + if (reg & AMD_SIDE_READY) { + reg &= ~AMD_SIDE_READY; + writel(reg, mmio + AMD_SIDEINFO_OFFSET); + readl(mmio + AMD_SIDEINFO_OFFSET); + } +} + +static void amd_init_side_info(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + u32 ntb_ctl; + + amd_set_side_info_reg(ndev, false); + + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); + ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); +} + +static void amd_deinit_side_info(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + u32 ntb_ctl; + + amd_clear_side_info_reg(ndev, false); + + ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); + ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); + writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); +} + +static int amd_init_ntb(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + + ndev->mw_count = ndev->dev_data->mw_count; + ndev->spad_count = AMD_SPADS_CNT; + ndev->db_count = AMD_DB_CNT; + + switch (ndev->ntb.topo) { + case NTB_TOPO_PRI: + case NTB_TOPO_SEC: + ndev->spad_count >>= 1; + if (ndev->ntb.topo == NTB_TOPO_PRI) { + ndev->self_spad = 0; + ndev->peer_spad = 0x20; + } else { + ndev->self_spad = 0x20; + ndev->peer_spad = 0; + } + + INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb); + schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); + + break; + default: + dev_err(&ndev->ntb.pdev->dev, + "AMD NTB does not support B2B mode.\n"); + return -EINVAL; + } + + /* Mask event interrupts */ + writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); + + return 0; +} + +static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + u32 info; + + info = readl(mmio + AMD_SIDEINFO_OFFSET); + if (info & AMD_SIDE_MASK) + return NTB_TOPO_SEC; + else + return NTB_TOPO_PRI; +} + +static int amd_init_dev(struct amd_ntb_dev *ndev) +{ + void __iomem *mmio = ndev->self_mmio; + struct pci_dev *pdev; + int rc = 0; + + pdev = ndev->ntb.pdev; + + ndev->ntb.topo = amd_get_topo(ndev); + dev_dbg(&pdev->dev, "AMD NTB topo is %s\n", + ntb_topo_string(ndev->ntb.topo)); + + rc = amd_init_ntb(ndev); + if (rc) + return rc; + + rc = amd_init_isr(ndev); + if (rc) { + dev_err(&pdev->dev, "fail to init isr.\n"); + return rc; + } + + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + /* + * We reserve the highest order bit of the DB register which will + * be used to notify peer when the driver on this side is being + * un-loaded. + */ + ndev->db_last_bit = + find_last_bit((unsigned long *)&ndev->db_valid_mask, + hweight64(ndev->db_valid_mask)); + writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET); + /* + * Since now there is one less bit to account for, the DB count + * and DB mask should be adjusted accordingly. + */ + ndev->db_count -= 1; + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + /* Enable Link-Up and Link-Down event interrupts */ + ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT); + writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); + + return 0; +} + +static void amd_deinit_dev(struct amd_ntb_dev *ndev) +{ + cancel_delayed_work_sync(&ndev->hb_timer); + + ndev_deinit_isr(ndev); +} + +static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, + struct pci_dev *pdev) +{ + int rc; + + pci_set_drvdata(pdev, ndev); + + rc = pci_enable_device(pdev); + if (rc) + goto err_pci_enable; + + rc = pci_request_regions(pdev, NTB_NAME); + if (rc) + goto err_pci_regions; + + pci_set_master(pdev); + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + goto err_dma_mask; + dev_warn(&pdev->dev, "Cannot DMA highmem\n"); + } + + ndev->self_mmio = pci_iomap(pdev, 0, 0); + if (!ndev->self_mmio) { + rc = -EIO; + goto err_dma_mask; + } + ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET; + + return 0; + +err_dma_mask: + pci_release_regions(pdev); +err_pci_regions: + pci_disable_device(pdev); +err_pci_enable: + pci_set_drvdata(pdev, NULL); + return rc; +} + +static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + + pci_iounmap(pdev, ndev->self_mmio); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int amd_ntb_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct amd_ntb_dev *ndev; + int rc, node; + + node = dev_to_node(&pdev->dev); + + ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); + if (!ndev) { + rc = -ENOMEM; + goto err_ndev; + } + + ndev->dev_data = (struct ntb_dev_data *)id->driver_data; + + ndev_init_struct(ndev, pdev); + + rc = amd_ntb_init_pci(ndev, pdev); + if (rc) + goto err_init_pci; + + rc = amd_init_dev(ndev); + if (rc) + goto err_init_dev; + + /* write side info */ + amd_init_side_info(ndev); + + amd_poll_link(ndev); + + ndev_init_debugfs(ndev); + + rc = ntb_register_device(&ndev->ntb); + if (rc) + goto err_register; + + dev_info(&pdev->dev, "NTB device registered.\n"); + + return 0; + +err_register: + ndev_deinit_debugfs(ndev); + amd_deinit_dev(ndev); +err_init_dev: + amd_ntb_deinit_pci(ndev); +err_init_pci: + kfree(ndev); +err_ndev: + return rc; +} + +static void amd_ntb_pci_remove(struct pci_dev *pdev) +{ + struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); + + /* + * Clear the READY bit in SIDEINFO register before sending DB event + * to the peer. This will make sure that when the peer handles the + * DB event, it correctly reads this bit as being 0. + */ + amd_deinit_side_info(ndev); + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); + ntb_unregister_device(&ndev->ntb); + ndev_deinit_debugfs(ndev); + amd_deinit_dev(ndev); + amd_ntb_deinit_pci(ndev); + kfree(ndev); +} + +static void amd_ntb_pci_shutdown(struct pci_dev *pdev) +{ + struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); + + /* Send link down notification */ + ntb_link_event(&ndev->ntb); + + amd_deinit_side_info(ndev); + ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); + ntb_unregister_device(&ndev->ntb); + ndev_deinit_debugfs(ndev); + amd_deinit_dev(ndev); + amd_ntb_deinit_pci(ndev); + kfree(ndev); +} + +static const struct file_operations amd_ntb_debugfs_info = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ndev_debugfs_read, +}; + +static const struct ntb_dev_data dev_data[] = { + { /* for device 145b */ + .mw_count = 3, + .mw_idx = 1, + }, + { /* for device 148b */ + .mw_count = 2, + .mw_idx = 2, + }, +}; + +static const struct pci_device_id amd_ntb_pci_tbl[] = { + { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] }, + { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl); + +static struct pci_driver amd_ntb_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = amd_ntb_pci_tbl, + .probe = amd_ntb_pci_probe, + .remove = amd_ntb_pci_remove, + .shutdown = amd_ntb_pci_shutdown, +}; + +static int __init amd_ntb_pci_driver_init(void) +{ + int ret; + pr_info("%s %s\n", NTB_DESC, NTB_VER); + + if (debugfs_initialized()) + debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + ret = pci_register_driver(&amd_ntb_pci_driver); + if (ret) + debugfs_remove_recursive(debugfs_dir); + + return ret; +} +module_init(amd_ntb_pci_driver_init); + +static void __exit amd_ntb_pci_driver_exit(void) +{ + pci_unregister_driver(&amd_ntb_pci_driver); + debugfs_remove_recursive(debugfs_dir); +} +module_exit(amd_ntb_pci_driver_exit); diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h new file mode 100644 index 0000000000..5f337b1572 --- /dev/null +++ b/drivers/ntb/hw/amd/ntb_hw_amd.h @@ -0,0 +1,223 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of AMD Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * AMD PCIe NTB Linux driver + * + * Contact Information: + * Xiangliang Yu + */ + +#ifndef NTB_HW_AMD_H +#define NTB_HW_AMD_H + +#include +#include + +#define AMD_LINK_HB_TIMEOUT msecs_to_jiffies(1000) +#define NTB_LNK_STA_SPEED_MASK 0x000F0000 +#define NTB_LNK_STA_WIDTH_MASK 0x03F00000 +#define NTB_LNK_STA_SPEED(x) (((x) & NTB_LNK_STA_SPEED_MASK) >> 16) +#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 20) + +#ifndef read64 +#ifdef readq +#define read64 readq +#else +#define read64 _read64 +static inline u64 _read64(void __iomem *mmio) +{ + u64 low, high; + + low = readl(mmio); + high = readl(mmio + sizeof(u32)); + return low | (high << 32); +} +#endif +#endif + +#ifndef write64 +#ifdef writeq +#define write64 writeq +#else +#define write64 _write64 +static inline void _write64(u64 val, void __iomem *mmio) +{ + writel(val, mmio); + writel(val >> 32, mmio + sizeof(u32)); +} +#endif +#endif + +enum { + /* AMD NTB Capability */ + AMD_DB_CNT = 16, + AMD_MSIX_VECTOR_CNT = 24, + AMD_SPADS_CNT = 16, + + /* AMD NTB register offset */ + AMD_CNTL_OFFSET = 0x200, + + /* NTB control register bits */ + PMM_REG_CTL = BIT(21), + SMM_REG_CTL = BIT(20), + SMM_REG_ACC_PATH = BIT(18), + PMM_REG_ACC_PATH = BIT(17), + NTB_CLK_EN = BIT(16), + + AMD_STA_OFFSET = 0x204, + AMD_PGSLV_OFFSET = 0x208, + AMD_SPAD_MUX_OFFSET = 0x20C, + AMD_SPAD_OFFSET = 0x210, + AMD_RSMU_HCID = 0x250, + AMD_RSMU_SIID = 0x254, + AMD_PSION_OFFSET = 0x300, + AMD_SSION_OFFSET = 0x330, + AMD_MMINDEX_OFFSET = 0x400, + AMD_MMDATA_OFFSET = 0x404, + AMD_SIDEINFO_OFFSET = 0x408, + + AMD_SIDE_MASK = BIT(0), + AMD_SIDE_READY = BIT(1), + + /* limit register */ + AMD_ROMBARLMT_OFFSET = 0x410, + AMD_BAR1LMT_OFFSET = 0x414, + AMD_BAR23LMT_OFFSET = 0x418, + AMD_BAR45LMT_OFFSET = 0x420, + /* xlat address */ + AMD_POMBARXLAT_OFFSET = 0x428, + AMD_BAR1XLAT_OFFSET = 0x430, + AMD_BAR23XLAT_OFFSET = 0x438, + AMD_BAR45XLAT_OFFSET = 0x440, + /* doorbell and interrupt */ + AMD_DBFM_OFFSET = 0x450, + AMD_DBREQ_OFFSET = 0x454, + AMD_MIRRDBSTAT_OFFSET = 0x458, + AMD_DBMASK_OFFSET = 0x45C, + AMD_DBSTAT_OFFSET = 0x460, + AMD_INTMASK_OFFSET = 0x470, + AMD_INTSTAT_OFFSET = 0x474, + + /* event type */ + AMD_PEER_FLUSH_EVENT = BIT(0), + AMD_PEER_RESET_EVENT = BIT(1), + AMD_PEER_D3_EVENT = BIT(2), + AMD_PEER_PMETO_EVENT = BIT(3), + AMD_PEER_D0_EVENT = BIT(4), + AMD_LINK_UP_EVENT = BIT(5), + AMD_LINK_DOWN_EVENT = BIT(6), + AMD_EVENT_INTMASK = (AMD_PEER_FLUSH_EVENT | + AMD_PEER_RESET_EVENT | AMD_PEER_D3_EVENT | + AMD_PEER_PMETO_EVENT | AMD_PEER_D0_EVENT | + AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT), + + AMD_PMESTAT_OFFSET = 0x480, + AMD_PMSGTRIG_OFFSET = 0x490, + AMD_LTRLATENCY_OFFSET = 0x494, + AMD_FLUSHTRIG_OFFSET = 0x498, + + /* SMU register*/ + AMD_SMUACK_OFFSET = 0x4A0, + AMD_SINRST_OFFSET = 0x4A4, + AMD_RSPNUM_OFFSET = 0x4A8, + AMD_SMU_SPADMUTEX = 0x4B0, + AMD_SMU_SPADOFFSET = 0x4B4, + + AMD_PEER_OFFSET = 0x400, +}; + +struct ntb_dev_data { + const unsigned char mw_count; + const unsigned int mw_idx; +}; + +struct amd_ntb_dev; + +struct amd_ntb_vec { + struct amd_ntb_dev *ndev; + int num; +}; + +struct amd_ntb_dev { + struct ntb_dev ntb; + + u32 ntb_side; + u32 lnk_sta; + u32 cntl_sta; + u32 peer_sta; + + struct ntb_dev_data *dev_data; + unsigned char mw_count; + unsigned char spad_count; + unsigned char db_count; + unsigned char msix_vec_count; + + u64 db_valid_mask; + u64 db_mask; + u64 db_last_bit; + u32 int_mask; + + struct msix_entry *msix; + struct amd_ntb_vec *vec; + + /* synchronize rmw access of db_mask and hw reg */ + spinlock_t db_mask_lock; + + void __iomem *self_mmio; + void __iomem *peer_mmio; + unsigned int self_spad; + unsigned int peer_spad; + + struct delayed_work hb_timer; + + struct dentry *debugfs_dir; + struct dentry *debugfs_info; +}; + +#define ntb_ndev(__ntb) container_of(__ntb, struct amd_ntb_dev, ntb) +#define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work) + +static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer); +static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer); +static int amd_poll_link(struct amd_ntb_dev *ndev); + +#endif diff --git a/drivers/ntb/hw/epf/Kconfig b/drivers/ntb/hw/epf/Kconfig new file mode 100644 index 0000000000..6197d1aab3 --- /dev/null +++ b/drivers/ntb/hw/epf/Kconfig @@ -0,0 +1,6 @@ +config NTB_EPF + tristate "Generic EPF Non-Transparent Bridge support" + depends on m + help + This driver supports EPF NTB on configurable endpoint. + If unsure, say N. diff --git a/drivers/ntb/hw/epf/Makefile b/drivers/ntb/hw/epf/Makefile new file mode 100644 index 0000000000..2f560a422b --- /dev/null +++ b/drivers/ntb/hw/epf/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NTB_EPF) += ntb_hw_epf.o diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c new file mode 100644 index 0000000000..b640aa0bf4 --- /dev/null +++ b/drivers/ntb/hw/epf/ntb_hw_epf.c @@ -0,0 +1,771 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Host side endpoint driver to implement Non-Transparent Bridge functionality + * + * Copyright (C) 2020 Texas Instruments + * Author: Kishon Vijay Abraham I + */ + +#include +#include +#include +#include +#include + +#define NTB_EPF_COMMAND 0x0 +#define CMD_CONFIGURE_DOORBELL 1 +#define CMD_TEARDOWN_DOORBELL 2 +#define CMD_CONFIGURE_MW 3 +#define CMD_TEARDOWN_MW 4 +#define CMD_LINK_UP 5 +#define CMD_LINK_DOWN 6 + +#define NTB_EPF_ARGUMENT 0x4 +#define MSIX_ENABLE BIT(16) + +#define NTB_EPF_CMD_STATUS 0x8 +#define COMMAND_STATUS_OK 1 +#define COMMAND_STATUS_ERROR 2 + +#define NTB_EPF_LINK_STATUS 0x0A +#define LINK_STATUS_UP BIT(0) + +#define NTB_EPF_TOPOLOGY 0x0C +#define NTB_EPF_LOWER_ADDR 0x10 +#define NTB_EPF_UPPER_ADDR 0x14 +#define NTB_EPF_LOWER_SIZE 0x18 +#define NTB_EPF_UPPER_SIZE 0x1C +#define NTB_EPF_MW_COUNT 0x20 +#define NTB_EPF_MW1_OFFSET 0x24 +#define NTB_EPF_SPAD_OFFSET 0x28 +#define NTB_EPF_SPAD_COUNT 0x2C +#define NTB_EPF_DB_ENTRY_SIZE 0x30 +#define NTB_EPF_DB_DATA(n) (0x34 + (n) * 4) +#define NTB_EPF_DB_OFFSET(n) (0xB4 + (n) * 4) + +#define NTB_EPF_MIN_DB_COUNT 3 +#define NTB_EPF_MAX_DB_COUNT 31 + +#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */ + +enum pci_barno { + BAR_0, + BAR_1, + BAR_2, + BAR_3, + BAR_4, + BAR_5, +}; + +struct ntb_epf_dev { + struct ntb_dev ntb; + struct device *dev; + /* Mutex to protect providing commands to NTB EPF */ + struct mutex cmd_lock; + + enum pci_barno ctrl_reg_bar; + enum pci_barno peer_spad_reg_bar; + enum pci_barno db_reg_bar; + enum pci_barno mw_bar; + + unsigned int mw_count; + unsigned int spad_count; + unsigned int db_count; + + void __iomem *ctrl_reg; + void __iomem *db_reg; + void __iomem *peer_spad_reg; + + unsigned int self_spad; + unsigned int peer_spad; + + int db_val; + u64 db_valid_mask; +}; + +#define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb) + +struct ntb_epf_data { + /* BAR that contains both control region and self spad region */ + enum pci_barno ctrl_reg_bar; + /* BAR that contains peer spad region */ + enum pci_barno peer_spad_reg_bar; + /* BAR that contains Doorbell region and Memory window '1' */ + enum pci_barno db_reg_bar; + /* BAR that contains memory windows*/ + enum pci_barno mw_bar; +}; + +static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command, + u32 argument) +{ + ktime_t timeout; + bool timedout; + int ret = 0; + u32 status; + + mutex_lock(&ndev->cmd_lock); + writel(argument, ndev->ctrl_reg + NTB_EPF_ARGUMENT); + writel(command, ndev->ctrl_reg + NTB_EPF_COMMAND); + + timeout = ktime_add_ms(ktime_get(), NTB_EPF_COMMAND_TIMEOUT); + while (1) { + timedout = ktime_after(ktime_get(), timeout); + status = readw(ndev->ctrl_reg + NTB_EPF_CMD_STATUS); + + if (status == COMMAND_STATUS_ERROR) { + ret = -EINVAL; + break; + } + + if (status == COMMAND_STATUS_OK) + break; + + if (WARN_ON(timedout)) { + ret = -ETIMEDOUT; + break; + } + + usleep_range(5, 10); + } + + writew(0, ndev->ctrl_reg + NTB_EPF_CMD_STATUS); + mutex_unlock(&ndev->cmd_lock); + + return ret; +} + +static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx) +{ + struct device *dev = ndev->dev; + + if (idx < 0 || idx > ndev->mw_count) { + dev_err(dev, "Unsupported Memory Window index %d\n", idx); + return -EINVAL; + } + + return idx + 2; +} + +static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + + if (pidx != NTB_DEF_PEER_IDX) { + dev_err(dev, "Unsupported Peer ID %d\n", pidx); + return -EINVAL; + } + + return ndev->mw_count; +} + +static int ntb_epf_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) { + dev_err(dev, "Unsupported Peer ID %d\n", pidx); + return -EINVAL; + } + + bar = ntb_epf_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + if (addr_align) + *addr_align = SZ_4K; + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = pci_resource_len(ndev->ntb.pdev, bar); + + return 0; +} + +static u64 ntb_epf_link_is_up(struct ntb_dev *ntb, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + u32 status; + + status = readw(ndev->ctrl_reg + NTB_EPF_LINK_STATUS); + + return status & LINK_STATUS_UP; +} + +static u32 ntb_epf_spad_read(struct ntb_dev *ntb, int idx) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + u32 offset; + + if (idx < 0 || idx >= ndev->spad_count) { + dev_err(dev, "READ: Invalid ScratchPad Index %d\n", idx); + return 0; + } + + offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET); + offset += (idx << 2); + + return readl(ndev->ctrl_reg + offset); +} + +static int ntb_epf_spad_write(struct ntb_dev *ntb, + int idx, u32 val) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + u32 offset; + + if (idx < 0 || idx >= ndev->spad_count) { + dev_err(dev, "WRITE: Invalid ScratchPad Index %d\n", idx); + return -EINVAL; + } + + offset = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET); + offset += (idx << 2); + writel(val, ndev->ctrl_reg + offset); + + return 0; +} + +static u32 ntb_epf_peer_spad_read(struct ntb_dev *ntb, int pidx, int idx) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + u32 offset; + + if (pidx != NTB_DEF_PEER_IDX) { + dev_err(dev, "Unsupported Peer ID %d\n", pidx); + return -EINVAL; + } + + if (idx < 0 || idx >= ndev->spad_count) { + dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx); + return -EINVAL; + } + + offset = (idx << 2); + return readl(ndev->peer_spad_reg + offset); +} + +static int ntb_epf_peer_spad_write(struct ntb_dev *ntb, int pidx, + int idx, u32 val) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + u32 offset; + + if (pidx != NTB_DEF_PEER_IDX) { + dev_err(dev, "Unsupported Peer ID %d\n", pidx); + return -EINVAL; + } + + if (idx < 0 || idx >= ndev->spad_count) { + dev_err(dev, "WRITE: Invalid Peer ScratchPad Index %d\n", idx); + return -EINVAL; + } + + offset = (idx << 2); + writel(val, ndev->peer_spad_reg + offset); + + return 0; +} + +static int ntb_epf_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + int ret; + + ret = ntb_epf_send_command(ndev, CMD_LINK_UP, 0); + if (ret) { + dev_err(dev, "Fail to enable link\n"); + return ret; + } + + return 0; +} + +static int ntb_epf_link_disable(struct ntb_dev *ntb) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + int ret; + + ret = ntb_epf_send_command(ndev, CMD_LINK_DOWN, 0); + if (ret) { + dev_err(dev, "Fail to disable link\n"); + return ret; + } + + return 0; +} + +static irqreturn_t ntb_epf_vec_isr(int irq, void *dev) +{ + struct ntb_epf_dev *ndev = dev; + int irq_no; + + irq_no = irq - pci_irq_vector(ndev->ntb.pdev, 0); + ndev->db_val = irq_no + 1; + + if (irq_no == 0) + ntb_link_event(&ndev->ntb); + else + ntb_db_event(&ndev->ntb, irq_no); + + return IRQ_HANDLED; +} + +static int ntb_epf_init_isr(struct ntb_epf_dev *ndev, int msi_min, int msi_max) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + struct device *dev = ndev->dev; + u32 argument = MSIX_ENABLE; + int irq; + int ret; + int i; + + irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, PCI_IRQ_MSIX); + if (irq < 0) { + dev_dbg(dev, "Failed to get MSIX interrupts\n"); + irq = pci_alloc_irq_vectors(pdev, msi_min, msi_max, + PCI_IRQ_MSI); + if (irq < 0) { + dev_err(dev, "Failed to get MSI interrupts\n"); + return irq; + } + argument &= ~MSIX_ENABLE; + } + + for (i = 0; i < irq; i++) { + ret = request_irq(pci_irq_vector(pdev, i), ntb_epf_vec_isr, + 0, "ntb_epf", ndev); + if (ret) { + dev_err(dev, "Failed to request irq\n"); + goto err_request_irq; + } + } + + ndev->db_count = irq - 1; + + ret = ntb_epf_send_command(ndev, CMD_CONFIGURE_DOORBELL, + argument | irq); + if (ret) { + dev_err(dev, "Failed to configure doorbell\n"); + goto err_configure_db; + } + + return 0; + +err_configure_db: + for (i = 0; i < ndev->db_count + 1; i++) + free_irq(pci_irq_vector(pdev, i), ndev); + +err_request_irq: + pci_free_irq_vectors(pdev); + + return ret; +} + +static int ntb_epf_peer_mw_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->mw_count; +} + +static int ntb_epf_spad_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->spad_count; +} + +static u64 ntb_epf_db_valid_mask(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->db_valid_mask; +} + +static int ntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + return 0; +} + +static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + resource_size_t mw_size; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) { + dev_err(dev, "Unsupported Peer ID %d\n", pidx); + return -EINVAL; + } + + bar = idx + ndev->mw_bar; + + mw_size = pci_resource_len(ntb->pdev, bar); + + if (size > mw_size) { + dev_err(dev, "Size:%pa is greater than the MW size %pa\n", + &size, &mw_size); + return -EINVAL; + } + + writel(lower_32_bits(addr), ndev->ctrl_reg + NTB_EPF_LOWER_ADDR); + writel(upper_32_bits(addr), ndev->ctrl_reg + NTB_EPF_UPPER_ADDR); + writel(lower_32_bits(size), ndev->ctrl_reg + NTB_EPF_LOWER_SIZE); + writel(upper_32_bits(size), ndev->ctrl_reg + NTB_EPF_UPPER_SIZE); + ntb_epf_send_command(ndev, CMD_CONFIGURE_MW, idx); + + return 0; +} + +static int ntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + struct device *dev = ndev->dev; + int ret = 0; + + ntb_epf_send_command(ndev, CMD_TEARDOWN_MW, idx); + if (ret) + dev_err(dev, "Failed to teardown memory window\n"); + + return ret; +} + +static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + u32 offset = 0; + int bar; + + if (idx == 0) + offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET); + + bar = idx + ndev->mw_bar; + + if (base) + *base = pci_resource_start(ndev->ntb.pdev, bar) + offset; + + if (size) + *size = pci_resource_len(ndev->ntb.pdev, bar) - offset; + + return 0; +} + +static int ntb_epf_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + u32 interrupt_num = ffs(db_bits) + 1; + struct device *dev = ndev->dev; + u32 db_entry_size; + u32 db_offset; + u32 db_data; + + if (interrupt_num > ndev->db_count) { + dev_err(dev, "DB interrupt %d greater than Max Supported %d\n", + interrupt_num, ndev->db_count); + return -EINVAL; + } + + db_entry_size = readl(ndev->ctrl_reg + NTB_EPF_DB_ENTRY_SIZE); + + db_data = readl(ndev->ctrl_reg + NTB_EPF_DB_DATA(interrupt_num)); + db_offset = readl(ndev->ctrl_reg + NTB_EPF_DB_OFFSET(interrupt_num)); + writel(db_data, ndev->db_reg + (db_entry_size * interrupt_num) + + db_offset); + + return 0; +} + +static u64 ntb_epf_db_read(struct ntb_dev *ntb) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + + return ndev->db_val; +} + +static int ntb_epf_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + return 0; +} + +static int ntb_epf_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct ntb_epf_dev *ndev = ntb_ndev(ntb); + + ndev->db_val = 0; + + return 0; +} + +static const struct ntb_dev_ops ntb_epf_ops = { + .mw_count = ntb_epf_mw_count, + .spad_count = ntb_epf_spad_count, + .peer_mw_count = ntb_epf_peer_mw_count, + .db_valid_mask = ntb_epf_db_valid_mask, + .db_set_mask = ntb_epf_db_set_mask, + .mw_set_trans = ntb_epf_mw_set_trans, + .mw_clear_trans = ntb_epf_mw_clear_trans, + .peer_mw_get_addr = ntb_epf_peer_mw_get_addr, + .link_enable = ntb_epf_link_enable, + .spad_read = ntb_epf_spad_read, + .spad_write = ntb_epf_spad_write, + .peer_spad_read = ntb_epf_peer_spad_read, + .peer_spad_write = ntb_epf_peer_spad_write, + .peer_db_set = ntb_epf_peer_db_set, + .db_read = ntb_epf_db_read, + .mw_get_align = ntb_epf_mw_get_align, + .link_is_up = ntb_epf_link_is_up, + .db_clear_mask = ntb_epf_db_clear_mask, + .db_clear = ntb_epf_db_clear, + .link_disable = ntb_epf_link_disable, +}; + +static inline void ntb_epf_init_struct(struct ntb_epf_dev *ndev, + struct pci_dev *pdev) +{ + ndev->ntb.pdev = pdev; + ndev->ntb.topo = NTB_TOPO_NONE; + ndev->ntb.ops = &ntb_epf_ops; +} + +static int ntb_epf_init_dev(struct ntb_epf_dev *ndev) +{ + struct device *dev = ndev->dev; + int ret; + + /* One Link interrupt and rest doorbell interrupt */ + ret = ntb_epf_init_isr(ndev, NTB_EPF_MIN_DB_COUNT + 1, + NTB_EPF_MAX_DB_COUNT + 1); + if (ret) { + dev_err(dev, "Failed to init ISR\n"); + return ret; + } + + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT); + ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT); + + return 0; +} + +static int ntb_epf_init_pci(struct ntb_epf_dev *ndev, + struct pci_dev *pdev) +{ + struct device *dev = ndev->dev; + size_t spad_sz, spad_off; + int ret; + + pci_set_drvdata(pdev, ndev); + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(dev, "Cannot enable PCI device\n"); + goto err_pci_enable; + } + + ret = pci_request_regions(pdev, "ntb"); + if (ret) { + dev_err(dev, "Cannot obtain PCI resources\n"); + goto err_pci_regions; + } + + pci_set_master(pdev); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "Cannot set DMA mask\n"); + goto err_pci_regions; + } + dev_warn(&pdev->dev, "Cannot DMA highmem\n"); + } + + ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0); + if (!ndev->ctrl_reg) { + ret = -EIO; + goto err_pci_regions; + } + + if (ndev->peer_spad_reg_bar) { + ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0); + if (!ndev->peer_spad_reg) { + ret = -EIO; + goto err_pci_regions; + } + } else { + spad_sz = 4 * readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT); + spad_off = readl(ndev->ctrl_reg + NTB_EPF_SPAD_OFFSET); + ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz; + } + + ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0); + if (!ndev->db_reg) { + ret = -EIO; + goto err_pci_regions; + } + + return 0; + +err_pci_regions: + pci_disable_device(pdev); + +err_pci_enable: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +static void ntb_epf_deinit_pci(struct ntb_epf_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + + pci_iounmap(pdev, ndev->ctrl_reg); + pci_iounmap(pdev, ndev->peer_spad_reg); + pci_iounmap(pdev, ndev->db_reg); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + int i; + + ntb_epf_send_command(ndev, CMD_TEARDOWN_DOORBELL, ndev->db_count + 1); + + for (i = 0; i < ndev->db_count + 1; i++) + free_irq(pci_irq_vector(pdev, i), ndev); + pci_free_irq_vectors(pdev); +} + +static int ntb_epf_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + enum pci_barno peer_spad_reg_bar = BAR_1; + enum pci_barno ctrl_reg_bar = BAR_0; + enum pci_barno db_reg_bar = BAR_2; + enum pci_barno mw_bar = BAR_2; + struct device *dev = &pdev->dev; + struct ntb_epf_data *data; + struct ntb_epf_dev *ndev; + int ret; + + if (pci_is_bridge(pdev)) + return -ENODEV; + + ndev = devm_kzalloc(dev, sizeof(*ndev), GFP_KERNEL); + if (!ndev) + return -ENOMEM; + + data = (struct ntb_epf_data *)id->driver_data; + if (data) { + peer_spad_reg_bar = data->peer_spad_reg_bar; + ctrl_reg_bar = data->ctrl_reg_bar; + db_reg_bar = data->db_reg_bar; + mw_bar = data->mw_bar; + } + + ndev->peer_spad_reg_bar = peer_spad_reg_bar; + ndev->ctrl_reg_bar = ctrl_reg_bar; + ndev->db_reg_bar = db_reg_bar; + ndev->mw_bar = mw_bar; + ndev->dev = dev; + + ntb_epf_init_struct(ndev, pdev); + mutex_init(&ndev->cmd_lock); + + ret = ntb_epf_init_pci(ndev, pdev); + if (ret) { + dev_err(dev, "Failed to init PCI\n"); + return ret; + } + + ret = ntb_epf_init_dev(ndev); + if (ret) { + dev_err(dev, "Failed to init device\n"); + goto err_init_dev; + } + + ret = ntb_register_device(&ndev->ntb); + if (ret) { + dev_err(dev, "Failed to register NTB device\n"); + goto err_register_dev; + } + + return 0; + +err_register_dev: + ntb_epf_cleanup_isr(ndev); + +err_init_dev: + ntb_epf_deinit_pci(ndev); + + return ret; +} + +static void ntb_epf_pci_remove(struct pci_dev *pdev) +{ + struct ntb_epf_dev *ndev = pci_get_drvdata(pdev); + + ntb_unregister_device(&ndev->ntb); + ntb_epf_cleanup_isr(ndev); + ntb_epf_deinit_pci(ndev); +} + +static const struct ntb_epf_data j721e_data = { + .ctrl_reg_bar = BAR_0, + .peer_spad_reg_bar = BAR_1, + .db_reg_bar = BAR_2, + .mw_bar = BAR_2, +}; + +static const struct ntb_epf_data mx8_data = { + .ctrl_reg_bar = BAR_0, + .peer_spad_reg_bar = BAR_0, + .db_reg_bar = BAR_2, + .mw_bar = BAR_4, +}; + +static const struct pci_device_id ntb_epf_pci_tbl[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), + .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, + .driver_data = (kernel_ulong_t)&j721e_data, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809), + .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00, + .driver_data = (kernel_ulong_t)&mx8_data, + }, + { }, +}; + +static struct pci_driver ntb_epf_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = ntb_epf_pci_tbl, + .probe = ntb_epf_pci_probe, + .remove = ntb_epf_pci_remove, +}; +module_pci_driver(ntb_epf_pci_driver); + +MODULE_DESCRIPTION("PCI ENDPOINT NTB HOST DRIVER"); +MODULE_AUTHOR("Kishon Vijay Abraham I "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/ntb/hw/idt/Kconfig b/drivers/ntb/hw/idt/Kconfig new file mode 100644 index 0000000000..c79b54c174 --- /dev/null +++ b/drivers/ntb/hw/idt/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NTB_IDT + tristate "IDT PCIe-switch Non-Transparent Bridge support" + depends on PCI + select HWMON + help + This driver supports NTB of capable IDT PCIe-switches. + + Some of the pre-initializations must be made before IDT PCIe-switch + exposes its NT-functions correctly. It should be done by either proper + initialization of EEPROM connected to master SMbus of the switch or + by BIOS using slave-SMBus interface changing corresponding registers + value. Evidently it must be done before PCI bus enumeration is + finished in Linux kernel. + + First of all partitions must be activated and properly assigned to all + the ports with NT-functions intended to be activated (see SWPARTxCTL + and SWPORTxCTL registers). Then all NT-function BARs must be enabled + with chosen valid aperture. For memory windows related BARs the + aperture settings shall determine the maximum size of memory windows + accepted by a BAR. Note that BAR0 must map PCI configuration space + registers. + + It's worth to note, that since a part of this driver relies on the + BAR settings of peer NT-functions, the BAR setups can't be done over + kernel PCI fixups. That's why the alternative pre-initialization + techniques like BIOS using SMBus interface or EEPROM should be + utilized. + + If unsure, say N. + diff --git a/drivers/ntb/hw/idt/Makefile b/drivers/ntb/hw/idt/Makefile new file mode 100644 index 0000000000..f75e9d65c8 --- /dev/null +++ b/drivers/ntb/hw/idt/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NTB_IDT) += ntb_hw_idt.o diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c new file mode 100644 index 0000000000..48823b53ed --- /dev/null +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -0,0 +1,2912 @@ +/* + * This file is provided under a GPLv2 license. When using or + * redistributing this file, you may do so under that license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, one can be found http://www.gnu.org/licenses/. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * IDT PCIe-switch NTB Linux driver + * + * Contact Information: + * Serge Semin , + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ntb_hw_idt.h" + +#define NTB_NAME "ntb_hw_idt" +#define NTB_DESC "IDT PCI-E Non-Transparent Bridge Driver" +#define NTB_VER "2.0" +#define NTB_IRQNAME "ntb_irq_idt" + +MODULE_DESCRIPTION(NTB_DESC); +MODULE_VERSION(NTB_VER); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("T-platforms"); + +/* + * NT Endpoint registers table simplifying a loop access to the functionally + * related registers + */ +static const struct idt_ntb_regs ntdata_tbl = { + { {IDT_NT_BARSETUP0, IDT_NT_BARLIMIT0, + IDT_NT_BARLTBASE0, IDT_NT_BARUTBASE0}, + {IDT_NT_BARSETUP1, IDT_NT_BARLIMIT1, + IDT_NT_BARLTBASE1, IDT_NT_BARUTBASE1}, + {IDT_NT_BARSETUP2, IDT_NT_BARLIMIT2, + IDT_NT_BARLTBASE2, IDT_NT_BARUTBASE2}, + {IDT_NT_BARSETUP3, IDT_NT_BARLIMIT3, + IDT_NT_BARLTBASE3, IDT_NT_BARUTBASE3}, + {IDT_NT_BARSETUP4, IDT_NT_BARLIMIT4, + IDT_NT_BARLTBASE4, IDT_NT_BARUTBASE4}, + {IDT_NT_BARSETUP5, IDT_NT_BARLIMIT5, + IDT_NT_BARLTBASE5, IDT_NT_BARUTBASE5} }, + { {IDT_NT_INMSG0, IDT_NT_OUTMSG0, IDT_NT_INMSGSRC0}, + {IDT_NT_INMSG1, IDT_NT_OUTMSG1, IDT_NT_INMSGSRC1}, + {IDT_NT_INMSG2, IDT_NT_OUTMSG2, IDT_NT_INMSGSRC2}, + {IDT_NT_INMSG3, IDT_NT_OUTMSG3, IDT_NT_INMSGSRC3} } +}; + +/* + * NT Endpoint ports data table with the corresponding pcie command, link + * status, control and BAR-related registers + */ +static const struct idt_ntb_port portdata_tbl[IDT_MAX_NR_PORTS] = { +/*0*/ { IDT_SW_NTP0_PCIECMDSTS, IDT_SW_NTP0_PCIELCTLSTS, + IDT_SW_NTP0_NTCTL, + IDT_SW_SWPORT0CTL, IDT_SW_SWPORT0STS, + { {IDT_SW_NTP0_BARSETUP0, IDT_SW_NTP0_BARLIMIT0, + IDT_SW_NTP0_BARLTBASE0, IDT_SW_NTP0_BARUTBASE0}, + {IDT_SW_NTP0_BARSETUP1, IDT_SW_NTP0_BARLIMIT1, + IDT_SW_NTP0_BARLTBASE1, IDT_SW_NTP0_BARUTBASE1}, + {IDT_SW_NTP0_BARSETUP2, IDT_SW_NTP0_BARLIMIT2, + IDT_SW_NTP0_BARLTBASE2, IDT_SW_NTP0_BARUTBASE2}, + {IDT_SW_NTP0_BARSETUP3, IDT_SW_NTP0_BARLIMIT3, + IDT_SW_NTP0_BARLTBASE3, IDT_SW_NTP0_BARUTBASE3}, + {IDT_SW_NTP0_BARSETUP4, IDT_SW_NTP0_BARLIMIT4, + IDT_SW_NTP0_BARLTBASE4, IDT_SW_NTP0_BARUTBASE4}, + {IDT_SW_NTP0_BARSETUP5, IDT_SW_NTP0_BARLIMIT5, + IDT_SW_NTP0_BARLTBASE5, IDT_SW_NTP0_BARUTBASE5} } }, +/*1*/ {0}, +/*2*/ { IDT_SW_NTP2_PCIECMDSTS, IDT_SW_NTP2_PCIELCTLSTS, + IDT_SW_NTP2_NTCTL, + IDT_SW_SWPORT2CTL, IDT_SW_SWPORT2STS, + { {IDT_SW_NTP2_BARSETUP0, IDT_SW_NTP2_BARLIMIT0, + IDT_SW_NTP2_BARLTBASE0, IDT_SW_NTP2_BARUTBASE0}, + {IDT_SW_NTP2_BARSETUP1, IDT_SW_NTP2_BARLIMIT1, + IDT_SW_NTP2_BARLTBASE1, IDT_SW_NTP2_BARUTBASE1}, + {IDT_SW_NTP2_BARSETUP2, IDT_SW_NTP2_BARLIMIT2, + IDT_SW_NTP2_BARLTBASE2, IDT_SW_NTP2_BARUTBASE2}, + {IDT_SW_NTP2_BARSETUP3, IDT_SW_NTP2_BARLIMIT3, + IDT_SW_NTP2_BARLTBASE3, IDT_SW_NTP2_BARUTBASE3}, + {IDT_SW_NTP2_BARSETUP4, IDT_SW_NTP2_BARLIMIT4, + IDT_SW_NTP2_BARLTBASE4, IDT_SW_NTP2_BARUTBASE4}, + {IDT_SW_NTP2_BARSETUP5, IDT_SW_NTP2_BARLIMIT5, + IDT_SW_NTP2_BARLTBASE5, IDT_SW_NTP2_BARUTBASE5} } }, +/*3*/ {0}, +/*4*/ { IDT_SW_NTP4_PCIECMDSTS, IDT_SW_NTP4_PCIELCTLSTS, + IDT_SW_NTP4_NTCTL, + IDT_SW_SWPORT4CTL, IDT_SW_SWPORT4STS, + { {IDT_SW_NTP4_BARSETUP0, IDT_SW_NTP4_BARLIMIT0, + IDT_SW_NTP4_BARLTBASE0, IDT_SW_NTP4_BARUTBASE0}, + {IDT_SW_NTP4_BARSETUP1, IDT_SW_NTP4_BARLIMIT1, + IDT_SW_NTP4_BARLTBASE1, IDT_SW_NTP4_BARUTBASE1}, + {IDT_SW_NTP4_BARSETUP2, IDT_SW_NTP4_BARLIMIT2, + IDT_SW_NTP4_BARLTBASE2, IDT_SW_NTP4_BARUTBASE2}, + {IDT_SW_NTP4_BARSETUP3, IDT_SW_NTP4_BARLIMIT3, + IDT_SW_NTP4_BARLTBASE3, IDT_SW_NTP4_BARUTBASE3}, + {IDT_SW_NTP4_BARSETUP4, IDT_SW_NTP4_BARLIMIT4, + IDT_SW_NTP4_BARLTBASE4, IDT_SW_NTP4_BARUTBASE4}, + {IDT_SW_NTP4_BARSETUP5, IDT_SW_NTP4_BARLIMIT5, + IDT_SW_NTP4_BARLTBASE5, IDT_SW_NTP4_BARUTBASE5} } }, +/*5*/ {0}, +/*6*/ { IDT_SW_NTP6_PCIECMDSTS, IDT_SW_NTP6_PCIELCTLSTS, + IDT_SW_NTP6_NTCTL, + IDT_SW_SWPORT6CTL, IDT_SW_SWPORT6STS, + { {IDT_SW_NTP6_BARSETUP0, IDT_SW_NTP6_BARLIMIT0, + IDT_SW_NTP6_BARLTBASE0, IDT_SW_NTP6_BARUTBASE0}, + {IDT_SW_NTP6_BARSETUP1, IDT_SW_NTP6_BARLIMIT1, + IDT_SW_NTP6_BARLTBASE1, IDT_SW_NTP6_BARUTBASE1}, + {IDT_SW_NTP6_BARSETUP2, IDT_SW_NTP6_BARLIMIT2, + IDT_SW_NTP6_BARLTBASE2, IDT_SW_NTP6_BARUTBASE2}, + {IDT_SW_NTP6_BARSETUP3, IDT_SW_NTP6_BARLIMIT3, + IDT_SW_NTP6_BARLTBASE3, IDT_SW_NTP6_BARUTBASE3}, + {IDT_SW_NTP6_BARSETUP4, IDT_SW_NTP6_BARLIMIT4, + IDT_SW_NTP6_BARLTBASE4, IDT_SW_NTP6_BARUTBASE4}, + {IDT_SW_NTP6_BARSETUP5, IDT_SW_NTP6_BARLIMIT5, + IDT_SW_NTP6_BARLTBASE5, IDT_SW_NTP6_BARUTBASE5} } }, +/*7*/ {0}, +/*8*/ { IDT_SW_NTP8_PCIECMDSTS, IDT_SW_NTP8_PCIELCTLSTS, + IDT_SW_NTP8_NTCTL, + IDT_SW_SWPORT8CTL, IDT_SW_SWPORT8STS, + { {IDT_SW_NTP8_BARSETUP0, IDT_SW_NTP8_BARLIMIT0, + IDT_SW_NTP8_BARLTBASE0, IDT_SW_NTP8_BARUTBASE0}, + {IDT_SW_NTP8_BARSETUP1, IDT_SW_NTP8_BARLIMIT1, + IDT_SW_NTP8_BARLTBASE1, IDT_SW_NTP8_BARUTBASE1}, + {IDT_SW_NTP8_BARSETUP2, IDT_SW_NTP8_BARLIMIT2, + IDT_SW_NTP8_BARLTBASE2, IDT_SW_NTP8_BARUTBASE2}, + {IDT_SW_NTP8_BARSETUP3, IDT_SW_NTP8_BARLIMIT3, + IDT_SW_NTP8_BARLTBASE3, IDT_SW_NTP8_BARUTBASE3}, + {IDT_SW_NTP8_BARSETUP4, IDT_SW_NTP8_BARLIMIT4, + IDT_SW_NTP8_BARLTBASE4, IDT_SW_NTP8_BARUTBASE4}, + {IDT_SW_NTP8_BARSETUP5, IDT_SW_NTP8_BARLIMIT5, + IDT_SW_NTP8_BARLTBASE5, IDT_SW_NTP8_BARUTBASE5} } }, +/*9*/ {0}, +/*10*/ {0}, +/*11*/ {0}, +/*12*/ { IDT_SW_NTP12_PCIECMDSTS, IDT_SW_NTP12_PCIELCTLSTS, + IDT_SW_NTP12_NTCTL, + IDT_SW_SWPORT12CTL, IDT_SW_SWPORT12STS, + { {IDT_SW_NTP12_BARSETUP0, IDT_SW_NTP12_BARLIMIT0, + IDT_SW_NTP12_BARLTBASE0, IDT_SW_NTP12_BARUTBASE0}, + {IDT_SW_NTP12_BARSETUP1, IDT_SW_NTP12_BARLIMIT1, + IDT_SW_NTP12_BARLTBASE1, IDT_SW_NTP12_BARUTBASE1}, + {IDT_SW_NTP12_BARSETUP2, IDT_SW_NTP12_BARLIMIT2, + IDT_SW_NTP12_BARLTBASE2, IDT_SW_NTP12_BARUTBASE2}, + {IDT_SW_NTP12_BARSETUP3, IDT_SW_NTP12_BARLIMIT3, + IDT_SW_NTP12_BARLTBASE3, IDT_SW_NTP12_BARUTBASE3}, + {IDT_SW_NTP12_BARSETUP4, IDT_SW_NTP12_BARLIMIT4, + IDT_SW_NTP12_BARLTBASE4, IDT_SW_NTP12_BARUTBASE4}, + {IDT_SW_NTP12_BARSETUP5, IDT_SW_NTP12_BARLIMIT5, + IDT_SW_NTP12_BARLTBASE5, IDT_SW_NTP12_BARUTBASE5} } }, +/*13*/ {0}, +/*14*/ {0}, +/*15*/ {0}, +/*16*/ { IDT_SW_NTP16_PCIECMDSTS, IDT_SW_NTP16_PCIELCTLSTS, + IDT_SW_NTP16_NTCTL, + IDT_SW_SWPORT16CTL, IDT_SW_SWPORT16STS, + { {IDT_SW_NTP16_BARSETUP0, IDT_SW_NTP16_BARLIMIT0, + IDT_SW_NTP16_BARLTBASE0, IDT_SW_NTP16_BARUTBASE0}, + {IDT_SW_NTP16_BARSETUP1, IDT_SW_NTP16_BARLIMIT1, + IDT_SW_NTP16_BARLTBASE1, IDT_SW_NTP16_BARUTBASE1}, + {IDT_SW_NTP16_BARSETUP2, IDT_SW_NTP16_BARLIMIT2, + IDT_SW_NTP16_BARLTBASE2, IDT_SW_NTP16_BARUTBASE2}, + {IDT_SW_NTP16_BARSETUP3, IDT_SW_NTP16_BARLIMIT3, + IDT_SW_NTP16_BARLTBASE3, IDT_SW_NTP16_BARUTBASE3}, + {IDT_SW_NTP16_BARSETUP4, IDT_SW_NTP16_BARLIMIT4, + IDT_SW_NTP16_BARLTBASE4, IDT_SW_NTP16_BARUTBASE4}, + {IDT_SW_NTP16_BARSETUP5, IDT_SW_NTP16_BARLIMIT5, + IDT_SW_NTP16_BARLTBASE5, IDT_SW_NTP16_BARUTBASE5} } }, +/*17*/ {0}, +/*18*/ {0}, +/*19*/ {0}, +/*20*/ { IDT_SW_NTP20_PCIECMDSTS, IDT_SW_NTP20_PCIELCTLSTS, + IDT_SW_NTP20_NTCTL, + IDT_SW_SWPORT20CTL, IDT_SW_SWPORT20STS, + { {IDT_SW_NTP20_BARSETUP0, IDT_SW_NTP20_BARLIMIT0, + IDT_SW_NTP20_BARLTBASE0, IDT_SW_NTP20_BARUTBASE0}, + {IDT_SW_NTP20_BARSETUP1, IDT_SW_NTP20_BARLIMIT1, + IDT_SW_NTP20_BARLTBASE1, IDT_SW_NTP20_BARUTBASE1}, + {IDT_SW_NTP20_BARSETUP2, IDT_SW_NTP20_BARLIMIT2, + IDT_SW_NTP20_BARLTBASE2, IDT_SW_NTP20_BARUTBASE2}, + {IDT_SW_NTP20_BARSETUP3, IDT_SW_NTP20_BARLIMIT3, + IDT_SW_NTP20_BARLTBASE3, IDT_SW_NTP20_BARUTBASE3}, + {IDT_SW_NTP20_BARSETUP4, IDT_SW_NTP20_BARLIMIT4, + IDT_SW_NTP20_BARLTBASE4, IDT_SW_NTP20_BARUTBASE4}, + {IDT_SW_NTP20_BARSETUP5, IDT_SW_NTP20_BARLIMIT5, + IDT_SW_NTP20_BARLTBASE5, IDT_SW_NTP20_BARUTBASE5} } }, +/*21*/ {0}, +/*22*/ {0}, +/*23*/ {0} +}; + +/* + * IDT PCIe-switch partitions table with the corresponding control, status + * and messages control registers + */ +static const struct idt_ntb_part partdata_tbl[IDT_MAX_NR_PARTS] = { +/*0*/ { IDT_SW_SWPART0CTL, IDT_SW_SWPART0STS, + {IDT_SW_SWP0MSGCTL0, IDT_SW_SWP0MSGCTL1, + IDT_SW_SWP0MSGCTL2, IDT_SW_SWP0MSGCTL3} }, +/*1*/ { IDT_SW_SWPART1CTL, IDT_SW_SWPART1STS, + {IDT_SW_SWP1MSGCTL0, IDT_SW_SWP1MSGCTL1, + IDT_SW_SWP1MSGCTL2, IDT_SW_SWP1MSGCTL3} }, +/*2*/ { IDT_SW_SWPART2CTL, IDT_SW_SWPART2STS, + {IDT_SW_SWP2MSGCTL0, IDT_SW_SWP2MSGCTL1, + IDT_SW_SWP2MSGCTL2, IDT_SW_SWP2MSGCTL3} }, +/*3*/ { IDT_SW_SWPART3CTL, IDT_SW_SWPART3STS, + {IDT_SW_SWP3MSGCTL0, IDT_SW_SWP3MSGCTL1, + IDT_SW_SWP3MSGCTL2, IDT_SW_SWP3MSGCTL3} }, +/*4*/ { IDT_SW_SWPART4CTL, IDT_SW_SWPART4STS, + {IDT_SW_SWP4MSGCTL0, IDT_SW_SWP4MSGCTL1, + IDT_SW_SWP4MSGCTL2, IDT_SW_SWP4MSGCTL3} }, +/*5*/ { IDT_SW_SWPART5CTL, IDT_SW_SWPART5STS, + {IDT_SW_SWP5MSGCTL0, IDT_SW_SWP5MSGCTL1, + IDT_SW_SWP5MSGCTL2, IDT_SW_SWP5MSGCTL3} }, +/*6*/ { IDT_SW_SWPART6CTL, IDT_SW_SWPART6STS, + {IDT_SW_SWP6MSGCTL0, IDT_SW_SWP6MSGCTL1, + IDT_SW_SWP6MSGCTL2, IDT_SW_SWP6MSGCTL3} }, +/*7*/ { IDT_SW_SWPART7CTL, IDT_SW_SWPART7STS, + {IDT_SW_SWP7MSGCTL0, IDT_SW_SWP7MSGCTL1, + IDT_SW_SWP7MSGCTL2, IDT_SW_SWP7MSGCTL3} } +}; + +/* + * DebugFS directory to place the driver debug file + */ +static struct dentry *dbgfs_topdir; + +/*============================================================================= + * 1. IDT PCIe-switch registers IO-functions + * + * Beside ordinary configuration space registers IDT PCIe-switch expose + * global configuration registers, which are used to determine state of other + * device ports as well as being notified of some switch-related events. + * Additionally all the configuration space registers of all the IDT + * PCIe-switch functions are mapped to the Global Address space, so each + * function can determine a configuration of any other PCI-function. + * Functions declared in this chapter are created to encapsulate access + * to configuration and global registers, so the driver code just need to + * provide IDT NTB hardware descriptor and a register address. + *============================================================================= + */ + +/* + * idt_nt_write() - PCI configuration space registers write method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * @data: Value to write to the register + * + * IDT PCIe-switch registers are all Little endian. + */ +static void idt_nt_write(struct idt_ntb_dev *ndev, + const unsigned int reg, const u32 data) +{ + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return; + + /* Just write the value to the specified register */ + iowrite32(data, ndev->cfgspc + (ptrdiff_t)reg); +} + +/* + * idt_nt_read() - PCI configuration space registers read method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * + * IDT PCIe-switch Global configuration registers are all Little endian. + * + * Return: register value + */ +static u32 idt_nt_read(struct idt_ntb_dev *ndev, const unsigned int reg) +{ + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return ~0; + + /* Just read the value from the specified register */ + return ioread32(ndev->cfgspc + (ptrdiff_t)reg); +} + +/* + * idt_sw_write() - Global registers write method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * @data: Value to write to the register + * + * IDT PCIe-switch Global configuration registers are all Little endian. + */ +static void idt_sw_write(struct idt_ntb_dev *ndev, + const unsigned int reg, const u32 data) +{ + unsigned long irqflags; + + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return; + + /* Lock GASA registers operations */ + spin_lock_irqsave(&ndev->gasa_lock, irqflags); + /* Set the global register address */ + iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR); + /* Put the new value of the register */ + iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA); + /* Unlock GASA registers operations */ + spin_unlock_irqrestore(&ndev->gasa_lock, irqflags); +} + +/* + * idt_sw_read() - Global registers read method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * + * IDT PCIe-switch Global configuration registers are all Little endian. + * + * Return: register value + */ +static u32 idt_sw_read(struct idt_ntb_dev *ndev, const unsigned int reg) +{ + unsigned long irqflags; + u32 data; + + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return ~0; + + /* Lock GASA registers operations */ + spin_lock_irqsave(&ndev->gasa_lock, irqflags); + /* Set the global register address */ + iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR); + /* Get the data of the register (read ops acts as MMIO barrier) */ + data = ioread32(ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA); + /* Unlock GASA registers operations */ + spin_unlock_irqrestore(&ndev->gasa_lock, irqflags); + + return data; +} + +/* + * idt_reg_set_bits() - set bits of a passed register + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to change bits of + * @reg_lock: Register access spin lock + * @valid_mask: Mask of valid bits + * @set_bits: Bitmask to set + * + * Helper method to check whether a passed bitfield is valid and set + * corresponding bits of a register. + * + * WARNING! Make sure the passed register isn't accessed over plane + * idt_nt_write() method (read method is ok to be used concurrently). + * + * Return: zero on success, negative error on invalid bitmask. + */ +static inline int idt_reg_set_bits(struct idt_ntb_dev *ndev, unsigned int reg, + spinlock_t *reg_lock, + u64 valid_mask, u64 set_bits) +{ + unsigned long irqflags; + u32 data; + + if (set_bits & ~(u64)valid_mask) + return -EINVAL; + + /* Lock access to the register unless the change is written back */ + spin_lock_irqsave(reg_lock, irqflags); + data = idt_nt_read(ndev, reg) | (u32)set_bits; + idt_nt_write(ndev, reg, data); + /* Unlock the register */ + spin_unlock_irqrestore(reg_lock, irqflags); + + return 0; +} + +/* + * idt_reg_clear_bits() - clear bits of a passed register + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to change bits of + * @reg_lock: Register access spin lock + * @set_bits: Bitmask to clear + * + * Helper method to check whether a passed bitfield is valid and clear + * corresponding bits of a register. + * + * NOTE! Invalid bits are always considered cleared so it's not an error + * to clear them over. + * + * WARNING! Make sure the passed register isn't accessed over plane + * idt_nt_write() method (read method is ok to use concurrently). + */ +static inline void idt_reg_clear_bits(struct idt_ntb_dev *ndev, + unsigned int reg, spinlock_t *reg_lock, + u64 clear_bits) +{ + unsigned long irqflags; + u32 data; + + /* Lock access to the register unless the change is written back */ + spin_lock_irqsave(reg_lock, irqflags); + data = idt_nt_read(ndev, reg) & ~(u32)clear_bits; + idt_nt_write(ndev, reg, data); + /* Unlock the register */ + spin_unlock_irqrestore(reg_lock, irqflags); +} + +/*=========================================================================== + * 2. Ports operations + * + * IDT PCIe-switches can have from 3 up to 8 ports with possible + * NT-functions enabled. So all the possible ports need to be scanned looking + * for NTB activated. NTB API will have enumerated only the ports with NTB. + *=========================================================================== + */ + +/* + * idt_scan_ports() - scan IDT PCIe-switch ports collecting info in the tables + * @ndev: Pointer to the PCI device descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_scan_ports(struct idt_ntb_dev *ndev) +{ + unsigned char pidx, port, part; + u32 data, portsts, partsts; + + /* Retrieve the local port number */ + data = idt_nt_read(ndev, IDT_NT_PCIELCAP); + ndev->port = GET_FIELD(PCIELCAP_PORTNUM, data); + + /* Retrieve the local partition number */ + portsts = idt_sw_read(ndev, portdata_tbl[ndev->port].sts); + ndev->part = GET_FIELD(SWPORTxSTS_SWPART, portsts); + + /* Initialize port/partition -> index tables with invalid values */ + memset(ndev->port_idx_map, -EINVAL, sizeof(ndev->port_idx_map)); + memset(ndev->part_idx_map, -EINVAL, sizeof(ndev->part_idx_map)); + + /* + * Walk over all the possible ports checking whether any of them has + * NT-function activated + */ + ndev->peer_cnt = 0; + for (pidx = 0; pidx < ndev->swcfg->port_cnt; pidx++) { + port = ndev->swcfg->ports[pidx]; + /* Skip local port */ + if (port == ndev->port) + continue; + + /* Read the port status register to get it partition */ + portsts = idt_sw_read(ndev, portdata_tbl[port].sts); + part = GET_FIELD(SWPORTxSTS_SWPART, portsts); + + /* Retrieve the partition status */ + partsts = idt_sw_read(ndev, partdata_tbl[part].sts); + /* Check if partition state is active and port has NTB */ + if (IS_FLD_SET(SWPARTxSTS_STATE, partsts, ACT) && + (IS_FLD_SET(SWPORTxSTS_MODE, portsts, NT) || + IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNT) || + IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNTDMA) || + IS_FLD_SET(SWPORTxSTS_MODE, portsts, NTDMA))) { + /* Save the port and partition numbers */ + ndev->peers[ndev->peer_cnt].port = port; + ndev->peers[ndev->peer_cnt].part = part; + /* Fill in the port/partition -> index tables */ + ndev->port_idx_map[port] = ndev->peer_cnt; + ndev->part_idx_map[part] = ndev->peer_cnt; + ndev->peer_cnt++; + } + } + + dev_dbg(&ndev->ntb.pdev->dev, "Local port: %hhu, num of peers: %hhu\n", + ndev->port, ndev->peer_cnt); + + /* It's useless to have this driver loaded if there is no any peer */ + if (ndev->peer_cnt == 0) { + dev_warn(&ndev->ntb.pdev->dev, "No active peer found\n"); + return -ENODEV; + } + + return 0; +} + +/* + * idt_ntb_port_number() - get the local port number + * @ntb: NTB device context. + * + * Return: the local port number + */ +static int idt_ntb_port_number(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return ndev->port; +} + +/* + * idt_ntb_peer_port_count() - get the number of peer ports + * @ntb: NTB device context. + * + * Return the count of detected peer NT-functions. + * + * Return: number of peer ports + */ +static int idt_ntb_peer_port_count(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return ndev->peer_cnt; +} + +/* + * idt_ntb_peer_port_number() - get peer port by given index + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * Return: peer port or negative error + */ +static int idt_ntb_peer_port_number(struct ntb_dev *ntb, int pidx) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + /* Return the detected NT-function port number */ + return ndev->peers[pidx].port; +} + +/* + * idt_ntb_peer_port_idx() - get peer port index by given port number + * @ntb: NTB device context. + * @port: Peer port number. + * + * Internal port -> index table is pre-initialized with -EINVAL values, + * so we just need to return it value + * + * Return: peer NT-function port index or negative error + */ +static int idt_ntb_peer_port_idx(struct ntb_dev *ntb, int port) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (port < 0 || IDT_MAX_NR_PORTS <= port) + return -EINVAL; + + return ndev->port_idx_map[port]; +} + +/*=========================================================================== + * 3. Link status operations + * There is no any ready-to-use method to have peer ports notified if NTB + * link is set up or got down. Instead global signal can be used instead. + * In case if any one of ports changes local NTB link state, it sends + * global signal and clears corresponding global state bit. Then all the ports + * receive a notification of that, so to make client driver being aware of + * possible NTB link change. + * Additionally each of active NT-functions is subscribed to PCIe-link + * state changes of peer ports. + *=========================================================================== + */ + +static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev); + +/* + * idt_init_link() - Initialize NTB link state notification subsystem + * @ndev: IDT NTB hardware driver descriptor + * + * Function performs the basic initialization of some global registers + * needed to enable IRQ-based notifications of PCIe Link Up/Down and + * Global Signal events. + * NOTE Since it's not possible to determine when all the NTB peer drivers are + * unloaded as well as have those registers accessed concurrently, we must + * preinitialize them with the same value and leave it uncleared on local + * driver unload. + */ +static void idt_init_link(struct idt_ntb_dev *ndev) +{ + u32 part_mask, port_mask, se_mask; + unsigned char pidx; + + /* Initialize spin locker of Mapping Table access registers */ + spin_lock_init(&ndev->mtbl_lock); + + /* Walk over all detected peers collecting port and partition masks */ + port_mask = ~BIT(ndev->port); + part_mask = ~BIT(ndev->part); + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + port_mask &= ~BIT(ndev->peers[pidx].port); + part_mask &= ~BIT(ndev->peers[pidx].part); + } + + /* Clean the Link Up/Down and GLobal Signal status registers */ + idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1); + + /* Unmask NT-activated partitions to receive Global Switch events */ + idt_sw_write(ndev, IDT_SW_SEPMSK, part_mask); + + /* Enable PCIe Link Up events of NT-activated ports */ + idt_sw_write(ndev, IDT_SW_SELINKUPMSK, port_mask); + + /* Enable PCIe Link Down events of NT-activated ports */ + idt_sw_write(ndev, IDT_SW_SELINKDNMSK, port_mask); + + /* Unmask NT-activated partitions to receive Global Signal events */ + idt_sw_write(ndev, IDT_SW_SEGSIGMSK, part_mask); + + /* Unmask Link Up/Down and Global Switch Events */ + se_mask = ~(IDT_SEMSK_LINKUP | IDT_SEMSK_LINKDN | IDT_SEMSK_GSIGNAL); + idt_sw_write(ndev, IDT_SW_SEMSK, se_mask); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events initialized"); +} + +/* + * idt_deinit_link() - deinitialize link subsystem + * @ndev: IDT NTB hardware driver descriptor + * + * Just disable the link back. + */ +static void idt_deinit_link(struct idt_ntb_dev *ndev) +{ + /* Disable the link */ + idt_ntb_local_link_disable(ndev); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events deinitialized"); +} + +/* + * idt_se_isr() - switch events ISR + * @ndev: IDT NTB hardware driver descriptor + * @ntint_sts: NT-function interrupt status + * + * This driver doesn't support IDT PCIe-switch dynamic reconfigurations, + * Failover capability, etc, so switch events are utilized to notify of + * PCIe and NTB link events. + * The method is called from PCIe ISR bottom-half routine. + */ +static void idt_se_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +{ + u32 sests; + + /* Read Switch Events status */ + sests = idt_sw_read(ndev, IDT_SW_SESTS); + + /* Clean the Link Up/Down and Global Signal status registers */ + idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1); + + /* Clean the corresponding interrupt bit */ + idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_SEVENT); + + dev_dbg(&ndev->ntb.pdev->dev, "SE IRQ detected %#08x (SESTS %#08x)", + ntint_sts, sests); + + /* Notify the client driver of possible link state change */ + ntb_link_event(&ndev->ntb); +} + +/* + * idt_ntb_local_link_enable() - enable the local NTB link. + * @ndev: IDT NTB hardware driver descriptor + * + * In order to enable the NTB link we need: + * - enable Completion TLPs translation + * - initialize mapping table to enable the Request ID translation + * - notify peers of NTB link state change + */ +static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev) +{ + u32 reqid, mtbldata = 0; + unsigned long irqflags; + + /* Enable the ID protection and Completion TLPs translation */ + idt_nt_write(ndev, IDT_NT_NTCTL, IDT_NTCTL_CPEN); + + /* Retrieve the current Requester ID (Bus:Device:Function) */ + reqid = idt_nt_read(ndev, IDT_NT_REQIDCAP); + + /* + * Set the corresponding NT Mapping table entry of port partition index + * with the data to perform the Request ID translation + */ + mtbldata = SET_FIELD(NTMTBLDATA_REQID, 0, reqid) | + SET_FIELD(NTMTBLDATA_PART, 0, ndev->part) | + IDT_NTMTBLDATA_VALID; + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); + idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + /* Notify the peers by setting and clearing the global signal bit */ + idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part); +} + +/* + * idt_ntb_local_link_disable() - disable the local NTB link. + * @ndev: IDT NTB hardware driver descriptor + * + * In order to enable the NTB link we need: + * - disable Completion TLPs translation + * - clear corresponding mapping table entry + * - notify peers of NTB link state change + */ +static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev) +{ + unsigned long irqflags; + + /* Disable Completion TLPs translation */ + idt_nt_write(ndev, IDT_NT_NTCTL, 0); + + /* Clear the corresponding NT Mapping table entry */ + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); + idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + /* Notify the peers by setting and clearing the global signal bit */ + idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part); +} + +/* + * idt_ntb_local_link_is_up() - test wethter local NTB link is up + * @ndev: IDT NTB hardware driver descriptor + * + * Local link is up under the following conditions: + * - Bus mastering is enabled + * - NTCTL has Completion TLPs translation enabled + * - Mapping table permits Request TLPs translation + * NOTE: We don't need to check PCIe link state since it's obviously + * up while we are able to communicate with IDT PCIe-switch + * + * Return: true if link is up, otherwise false + */ +static bool idt_ntb_local_link_is_up(struct idt_ntb_dev *ndev) +{ + unsigned long irqflags; + u32 data; + + /* Read the local Bus Master Enable status */ + data = idt_nt_read(ndev, IDT_NT_PCICMDSTS); + if (!(data & IDT_PCICMDSTS_BME)) + return false; + + /* Read the local Completion TLPs translation enable status */ + data = idt_nt_read(ndev, IDT_NT_NTCTL); + if (!(data & IDT_NTCTL_CPEN)) + return false; + + /* Read Mapping table entry corresponding to the local partition */ + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); + data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + return !!(data & IDT_NTMTBLDATA_VALID); +} + +/* + * idt_ntb_peer_link_is_up() - test whether peer NTB link is up + * @ndev: IDT NTB hardware driver descriptor + * @pidx: Peer port index + * + * Peer link is up under the following conditions: + * - PCIe link is up + * - Bus mastering is enabled + * - NTCTL has Completion TLPs translation enabled + * - Mapping table permits Request TLPs translation + * + * Return: true if link is up, otherwise false + */ +static bool idt_ntb_peer_link_is_up(struct idt_ntb_dev *ndev, int pidx) +{ + unsigned long irqflags; + unsigned char port; + u32 data; + + /* Retrieve the device port number */ + port = ndev->peers[pidx].port; + + /* Check whether PCIe link is up */ + data = idt_sw_read(ndev, portdata_tbl[port].sts); + if (!(data & IDT_SWPORTxSTS_LINKUP)) + return false; + + /* Check whether bus mastering is enabled on the peer port */ + data = idt_sw_read(ndev, portdata_tbl[port].pcicmdsts); + if (!(data & IDT_PCICMDSTS_BME)) + return false; + + /* Check if Completion TLPs translation is enabled on the peer port */ + data = idt_sw_read(ndev, portdata_tbl[port].ntctl); + if (!(data & IDT_NTCTL_CPEN)) + return false; + + /* Read Mapping table entry corresponding to the peer partition */ + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->peers[pidx].part); + data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + return !!(data & IDT_NTMTBLDATA_VALID); +} + +/* + * idt_ntb_link_is_up() - get the current ntb link state (NTB API callback) + * @ntb: NTB device context. + * @speed: OUT - The link speed expressed as PCIe generation number. + * @width: OUT - The link width expressed as the number of PCIe lanes. + * + * Get the bitfield of NTB link states for all peer ports + * + * Return: bitfield of indexed ports link state: bit is set/cleared if the + * link is up/down respectively. + */ +static u64 idt_ntb_link_is_up(struct ntb_dev *ntb, + enum ntb_speed *speed, enum ntb_width *width) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + unsigned char pidx; + u64 status; + u32 data; + + /* Retrieve the local link speed and width */ + if (speed != NULL || width != NULL) { + data = idt_nt_read(ndev, IDT_NT_PCIELCTLSTS); + if (speed != NULL) + *speed = GET_FIELD(PCIELCTLSTS_CLS, data); + if (width != NULL) + *width = GET_FIELD(PCIELCTLSTS_NLW, data); + } + + /* If local NTB link isn't up then all the links are considered down */ + if (!idt_ntb_local_link_is_up(ndev)) + return 0; + + /* Collect all the peer ports link states into the bitfield */ + status = 0; + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + if (idt_ntb_peer_link_is_up(ndev, pidx)) + status |= ((u64)1 << pidx); + } + + return status; +} + +/* + * idt_ntb_link_enable() - enable local port ntb link (NTB API callback) + * @ntb: NTB device context. + * @max_speed: The maximum link speed expressed as PCIe generation number. + * @max_width: The maximum link width expressed as the number of PCIe lanes. + * + * Enable just local NTB link. PCIe link parameters are ignored. + * + * Return: always zero. + */ +static int idt_ntb_link_enable(struct ntb_dev *ntb, enum ntb_speed speed, + enum ntb_width width) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + /* Just enable the local NTB link */ + idt_ntb_local_link_enable(ndev); + + dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link enabled"); + + return 0; +} + +/* + * idt_ntb_link_disable() - disable local port ntb link (NTB API callback) + * @ntb: NTB device context. + * + * Disable just local NTB link. + * + * Return: always zero. + */ +static int idt_ntb_link_disable(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + /* Just disable the local NTB link */ + idt_ntb_local_link_disable(ndev); + + dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link disabled"); + + return 0; +} + +/*============================================================================= + * 4. Memory Window operations + * + * IDT PCIe-switches have two types of memory windows: MWs with direct + * address translation and MWs with LUT based translation. The first type of + * MWs is simple map of corresponding BAR address space to a memory space + * of specified target port. So it implemets just ont-to-one mapping. Lookup + * table in its turn can map one BAR address space to up to 24 different + * memory spaces of different ports. + * NT-functions BARs can be turned on to implement either direct or lookup + * table based address translations, so: + * BAR0 - NT configuration registers space/direct address translation + * BAR1 - direct address translation/upper address of BAR0x64 + * BAR2 - direct address translation/Lookup table with either 12 or 24 entries + * BAR3 - direct address translation/upper address of BAR2x64 + * BAR4 - direct address translation/Lookup table with either 12 or 24 entries + * BAR5 - direct address translation/upper address of BAR4x64 + * Additionally BAR2 and BAR4 can't have 24-entries LUT enabled at the same + * time. Since the BARs setup can be rather complicated this driver implements + * a scanning algorithm to have all the possible memory windows configuration + * covered. + * + * NOTE 1 BAR setup must be done before Linux kernel enumerated NT-function + * of any port, so this driver would have memory windows configurations fixed. + * In this way all initializations must be performed either by platform BIOS + * or using EEPROM connected to IDT PCIe-switch master SMBus. + * + * NOTE 2 This driver expects BAR0 mapping NT-function configuration space. + * Easy calculation can give us an upper boundary of 29 possible memory windows + * per each NT-function if all the BARs are of 32bit type. + *============================================================================= + */ + +/* + * idt_get_mw_count() - get memory window count + * @mw_type: Memory window type + * + * Return: number of memory windows with respect to the BAR type + */ +static inline unsigned char idt_get_mw_count(enum idt_mw_type mw_type) +{ + switch (mw_type) { + case IDT_MW_DIR: + return 1; + case IDT_MW_LUT12: + return 12; + case IDT_MW_LUT24: + return 24; + default: + break; + } + + return 0; +} + +/* + * idt_get_mw_name() - get memory window name + * @mw_type: Memory window type + * + * Return: pointer to a string with name + */ +static inline char *idt_get_mw_name(enum idt_mw_type mw_type) +{ + switch (mw_type) { + case IDT_MW_DIR: + return "DIR "; + case IDT_MW_LUT12: + return "LUT12"; + case IDT_MW_LUT24: + return "LUT24"; + default: + break; + } + + return "unknown"; +} + +/* + * idt_scan_mws() - scan memory windows of the port + * @ndev: IDT NTB hardware driver descriptor + * @port: Port to get number of memory windows for + * @mw_cnt: Out - number of memory windows + * + * It walks over BAR setup registers of the specified port and determines + * the memory windows parameters if any activated. + * + * Return: array of memory windows + */ +static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, + unsigned char *mw_cnt) +{ + struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws; + const struct idt_ntb_bar *bars; + enum idt_mw_type mw_type; + unsigned char widx, bidx, en_cnt; + bool bar_64bit = false; + int aprt_size; + u32 data; + + /* Retrieve the array of the BARs registers */ + bars = portdata_tbl[port].bars; + + /* Scan all the BARs belonging to the port */ + *mw_cnt = 0; + for (bidx = 0; bidx < IDT_BAR_CNT; bidx += 1 + bar_64bit) { + /* Read BARSETUP register value */ + data = idt_sw_read(ndev, bars[bidx].setup); + + /* Skip disabled BARs */ + if (!(data & IDT_BARSETUP_EN)) { + bar_64bit = false; + continue; + } + + /* Skip next BARSETUP if current one has 64bit addressing */ + bar_64bit = IS_FLD_SET(BARSETUP_TYPE, data, 64); + + /* Skip configuration space mapping BARs */ + if (data & IDT_BARSETUP_MODE_CFG) + continue; + + /* Retrieve MW type/entries count and aperture size */ + mw_type = GET_FIELD(BARSETUP_ATRAN, data); + en_cnt = idt_get_mw_count(mw_type); + aprt_size = (u64)1 << GET_FIELD(BARSETUP_SIZE, data); + + /* Save configurations of all available memory windows */ + for (widx = 0; widx < en_cnt; widx++, (*mw_cnt)++) { + /* + * IDT can expose a limited number of MWs, so it's bug + * to have more than the driver expects + */ + if (*mw_cnt >= IDT_MAX_NR_MWS) + return ERR_PTR(-EINVAL); + + /* Save basic MW info */ + mws[*mw_cnt].type = mw_type; + mws[*mw_cnt].bar = bidx; + mws[*mw_cnt].idx = widx; + /* It's always DWORD aligned */ + mws[*mw_cnt].addr_align = IDT_TRANS_ALIGN; + /* DIR and LUT approachs differently configure MWs */ + if (mw_type == IDT_MW_DIR) + mws[*mw_cnt].size_max = aprt_size; + else if (mw_type == IDT_MW_LUT12) + mws[*mw_cnt].size_max = aprt_size / 16; + else + mws[*mw_cnt].size_max = aprt_size / 32; + mws[*mw_cnt].size_align = (mw_type == IDT_MW_DIR) ? + IDT_DIR_SIZE_ALIGN : mws[*mw_cnt].size_max; + } + } + + /* Allocate memory for memory window descriptors */ + ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws), + GFP_KERNEL); + if (!ret_mws) + return ERR_PTR(-ENOMEM); + + /* Copy the info of detected memory windows */ + memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws)); + + return ret_mws; +} + +/* + * idt_init_mws() - initialize memory windows subsystem + * @ndev: IDT NTB hardware driver descriptor + * + * Scan BAR setup registers of local and peer ports to determine the + * outbound and inbound memory windows parameters + * + * Return: zero on success, otherwise a negative error number + */ +static int idt_init_mws(struct idt_ntb_dev *ndev) +{ + struct idt_ntb_peer *peer; + unsigned char pidx; + + /* Scan memory windows of the local port */ + ndev->mws = idt_scan_mws(ndev, ndev->port, &ndev->mw_cnt); + if (IS_ERR(ndev->mws)) { + dev_err(&ndev->ntb.pdev->dev, + "Failed to scan mws of local port %hhu", ndev->port); + return PTR_ERR(ndev->mws); + } + + /* Scan memory windows of the peer ports */ + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + peer = &ndev->peers[pidx]; + peer->mws = idt_scan_mws(ndev, peer->port, &peer->mw_cnt); + if (IS_ERR(peer->mws)) { + dev_err(&ndev->ntb.pdev->dev, + "Failed to scan mws of port %hhu", peer->port); + return PTR_ERR(peer->mws); + } + } + + /* Initialize spin locker of the LUT registers */ + spin_lock_init(&ndev->lut_lock); + + dev_dbg(&ndev->ntb.pdev->dev, "Outbound and inbound MWs initialized"); + + return 0; +} + +/* + * idt_ntb_mw_count() - number of inbound memory windows (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * + * The value is returned for the specified peer, so generally speaking it can + * be different for different port depending on the IDT PCIe-switch + * initialization. + * + * Return: the number of memory windows. + */ +static int idt_ntb_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + return ndev->peers[pidx].mw_cnt; +} + +/* + * idt_ntb_mw_get_align() - inbound memory window parameters (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * @addr_align: OUT - the base alignment for translating the memory window + * @size_align: OUT - the size alignment for translating the memory window + * @size_max: OUT - the maximum size of the memory window + * + * The peer memory window parameters have already been determined, so just + * return the corresponding values, which mustn't change within session. + * + * Return: Zero on success, otherwise a negative error number. + */ +static int idt_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + struct idt_ntb_peer *peer; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + peer = &ndev->peers[pidx]; + + if (widx < 0 || peer->mw_cnt <= widx) + return -EINVAL; + + if (addr_align != NULL) + *addr_align = peer->mws[widx].addr_align; + + if (size_align != NULL) + *size_align = peer->mws[widx].size_align; + + if (size_max != NULL) + *size_max = peer->mws[widx].size_max; + + return 0; +} + +/* + * idt_ntb_peer_mw_count() - number of outbound memory windows + * (NTB API callback) + * @ntb: NTB device context. + * + * Outbound memory windows parameters have been determined based on the + * BAR setup registers value, which are mostly constants within one session. + * + * Return: the number of memory windows. + */ +static int idt_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return ndev->mw_cnt; +} + +/* + * idt_ntb_peer_mw_get_addr() - get map address of an outbound memory window + * (NTB API callback) + * @ntb: NTB device context. + * @widx: Memory window index (within ntb_peer_mw_count() return value). + * @base: OUT - the base address of mapping region. + * @size: OUT - the size of mapping region. + * + * Return just parameters of BAR resources mapping. Size reflects just the size + * of the resource + * + * Return: Zero on success, otherwise a negative error number. + */ +static int idt_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx, + phys_addr_t *base, resource_size_t *size) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (widx < 0 || ndev->mw_cnt <= widx) + return -EINVAL; + + /* Mapping address is just properly shifted BAR resource start */ + if (base != NULL) + *base = pci_resource_start(ntb->pdev, ndev->mws[widx].bar) + + ndev->mws[widx].idx * ndev->mws[widx].size_max; + + /* Mapping size has already been calculated at MWs scanning */ + if (size != NULL) + *size = ndev->mws[widx].size_max; + + return 0; +} + +/* + * idt_ntb_peer_mw_set_trans() - set a translation address of a memory window + * (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device the translation address received from. + * @widx: Memory window index. + * @addr: The dma address of the shared memory to access. + * @size: The size of the shared memory to access. + * + * The Direct address translation and LUT base translation is initialized a + * bit differenet. Although the parameters restriction are now determined by + * the same code. + * + * Return: Zero on success, otherwise an error number. + */ +static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, + u64 addr, resource_size_t size) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + struct idt_mw_cfg *mw_cfg; + u32 data = 0, lutoff = 0; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + if (widx < 0 || ndev->mw_cnt <= widx) + return -EINVAL; + + /* + * Retrieve the memory window config to make sure the passed arguments + * fit it restrictions + */ + mw_cfg = &ndev->mws[widx]; + if (!IS_ALIGNED(addr, mw_cfg->addr_align)) + return -EINVAL; + if (!IS_ALIGNED(size, mw_cfg->size_align) || size > mw_cfg->size_max) + return -EINVAL; + + /* DIR and LUT based translations are initialized differently */ + if (mw_cfg->type == IDT_MW_DIR) { + const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar]; + u64 limit; + /* Set destination partition of translation */ + data = idt_nt_read(ndev, bar->setup); + data = SET_FIELD(BARSETUP_TPART, data, ndev->peers[pidx].part); + idt_nt_write(ndev, bar->setup, data); + /* Set translation base address */ + idt_nt_write(ndev, bar->ltbase, (u32)addr); + idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32)); + /* Set the custom BAR aperture limit */ + limit = pci_bus_address(ntb->pdev, mw_cfg->bar) + size; + idt_nt_write(ndev, bar->limit, (u32)limit); + if (IS_FLD_SET(BARSETUP_TYPE, data, 64)) + idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32)); + } else { + unsigned long irqflags; + /* Initialize corresponding LUT entry */ + lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) | + SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar); + data = SET_FIELD(LUTUDATA_PART, 0, ndev->peers[pidx].part) | + IDT_LUTUDATA_VALID; + spin_lock_irqsave(&ndev->lut_lock, irqflags); + idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff); + idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr); + idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32)); + idt_nt_write(ndev, IDT_NT_LUTUDATA, data); + spin_unlock_irqrestore(&ndev->lut_lock, irqflags); + /* Limit address isn't specified since size is fixed for LUT */ + } + + return 0; +} + +/* + * idt_ntb_peer_mw_clear_trans() - clear the outbound MW translation address + * (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * + * It effectively disables the translation over the specified outbound MW. + * + * Return: Zero on success, otherwise an error number. + */ +static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx, + int widx) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + struct idt_mw_cfg *mw_cfg; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + if (widx < 0 || ndev->mw_cnt <= widx) + return -EINVAL; + + mw_cfg = &ndev->mws[widx]; + + /* DIR and LUT based translations are initialized differently */ + if (mw_cfg->type == IDT_MW_DIR) { + const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar]; + u32 data; + /* Read BARSETUP to check BAR type */ + data = idt_nt_read(ndev, bar->setup); + /* Disable translation by specifying zero BAR limit */ + idt_nt_write(ndev, bar->limit, 0); + if (IS_FLD_SET(BARSETUP_TYPE, data, 64)) + idt_nt_write(ndev, (bar + 1)->limit, 0); + } else { + unsigned long irqflags; + u32 lutoff; + /* Clear the corresponding LUT entry up */ + lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) | + SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar); + spin_lock_irqsave(&ndev->lut_lock, irqflags); + idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff); + idt_nt_write(ndev, IDT_NT_LUTLDATA, 0); + idt_nt_write(ndev, IDT_NT_LUTMDATA, 0); + idt_nt_write(ndev, IDT_NT_LUTUDATA, 0); + spin_unlock_irqrestore(&ndev->lut_lock, irqflags); + } + + return 0; +} + +/*============================================================================= + * 5. Doorbell operations + * + * Doorbell functionality of IDT PCIe-switches is pretty unusual. First of + * all there is global doorbell register which state can be changed by any + * NT-function of the IDT device in accordance with global permissions. These + * permissions configs are not supported by NTB API, so it must be done by + * either BIOS or EEPROM settings. In the same way the state of the global + * doorbell is reflected to the NT-functions local inbound doorbell registers. + * It can lead to situations when client driver sets some peer doorbell bits + * and get them bounced back to local inbound doorbell if permissions are + * granted. + * Secondly there is just one IRQ vector for Doorbell, Message, Temperature + * and Switch events, so if client driver left any of Doorbell bits set and + * some other event occurred, the driver will be notified of Doorbell event + * again. + *============================================================================= + */ + +/* + * idt_db_isr() - doorbell event ISR + * @ndev: IDT NTB hardware driver descriptor + * @ntint_sts: NT-function interrupt status + * + * Doorbell event happans when DBELL bit of NTINTSTS switches from 0 to 1. + * It happens only when unmasked doorbell bits are set to ones on completely + * zeroed doorbell register. + * The method is called from PCIe ISR bottom-half routine. + */ +static void idt_db_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +{ + /* + * Doorbell IRQ status will be cleaned only when client + * driver unsets all the doorbell bits. + */ + dev_dbg(&ndev->ntb.pdev->dev, "DB IRQ detected %#08x", ntint_sts); + + /* Notify the client driver of possible doorbell state change */ + ntb_db_event(&ndev->ntb, 0); +} + +/* + * idt_ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb + * (NTB API callback) + * @ntb: NTB device context. + * + * IDT PCIe-switches expose just one Doorbell register of DWORD size. + * + * Return: A mask of doorbell bits supported by the ntb. + */ +static u64 idt_ntb_db_valid_mask(struct ntb_dev *ntb) +{ + return IDT_DBELL_MASK; +} + +/* + * idt_ntb_db_read() - read the local doorbell register (NTB API callback) + * @ntb: NTB device context. + * + * There is just on inbound doorbell register of each NT-function, so + * this method return it value. + * + * Return: The bits currently set in the local doorbell register. + */ +static u64 idt_ntb_db_read(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_nt_read(ndev, IDT_NT_INDBELLSTS); +} + +/* + * idt_ntb_db_clear() - clear bits in the local doorbell register + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * Clear bits of inbound doorbell register by writing ones to it. + * + * NOTE! Invalid bits are always considered cleared so it's not an error + * to clear them over. + * + * Return: always zero as success. + */ +static int idt_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_nt_write(ndev, IDT_NT_INDBELLSTS, (u32)db_bits); + + return 0; +} + +/* + * idt_ntb_db_read_mask() - read the local doorbell mask (NTB API callback) + * @ntb: NTB device context. + * + * Each inbound doorbell bit can be masked from generating IRQ by setting + * the corresponding bit in inbound doorbell mask. So this method returns + * the value of the register. + * + * Return: The bits currently set in the local doorbell mask register. + */ +static u64 idt_ntb_db_read_mask(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_nt_read(ndev, IDT_NT_INDBELLMSK); +} + +/* + * idt_ntb_db_set_mask() - set bits in the local doorbell mask + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell mask bits to set. + * + * The inbound doorbell register mask value must be read, then OR'ed with + * passed field and only then set back. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_reg_set_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock, + IDT_DBELL_MASK, db_bits); +} + +/* + * idt_ntb_db_clear_mask() - clear bits in the local doorbell mask + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * The method just clears the set bits up in accordance with the passed + * bitfield. IDT PCIe-switch shall generate an interrupt if there hasn't + * been any unmasked bit set before current unmasking. Otherwise IRQ won't + * be generated since there is only one IRQ vector for all doorbells. + * + * Return: always zero as success + */ +static int idt_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_reg_clear_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock, + db_bits); + + return 0; +} + +/* + * idt_ntb_peer_db_set() - set bits in the peer doorbell register + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell bits to set. + * + * IDT PCIe-switches exposes local outbound doorbell register to change peer + * inbound doorbell register state. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (db_bits & ~(u64)IDT_DBELL_MASK) + return -EINVAL; + + idt_nt_write(ndev, IDT_NT_OUTDBELLSET, (u32)db_bits); + return 0; +} + +/*============================================================================= + * 6. Messaging operations + * + * Each NT-function of IDT PCIe-switch has four inbound and four outbound + * message registers. Each outbound message register can be connected to one or + * even more than one peer inbound message registers by setting global + * configurations. Since NTB API permits one-on-one message registers mapping + * only, the driver acts in according with that restriction. + *============================================================================= + */ + +/* + * idt_init_msg() - initialize messaging interface + * @ndev: IDT NTB hardware driver descriptor + * + * Just initialize the message registers routing tables locker. + */ +static void idt_init_msg(struct idt_ntb_dev *ndev) +{ + unsigned char midx; + + /* Init the messages routing table lockers */ + for (midx = 0; midx < IDT_MSG_CNT; midx++) + spin_lock_init(&ndev->msg_locks[midx]); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB Messaging initialized"); +} + +/* + * idt_msg_isr() - message event ISR + * @ndev: IDT NTB hardware driver descriptor + * @ntint_sts: NT-function interrupt status + * + * Message event happens when MSG bit of NTINTSTS switches from 0 to 1. + * It happens only when unmasked message status bits are set to ones on + * completely zeroed message status register. + * The method is called from PCIe ISR bottom-half routine. + */ +static void idt_msg_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +{ + /* + * Message IRQ status will be cleaned only when client + * driver unsets all the message status bits. + */ + dev_dbg(&ndev->ntb.pdev->dev, "Message IRQ detected %#08x", ntint_sts); + + /* Notify the client driver of possible message status change */ + ntb_msg_event(&ndev->ntb); +} + +/* + * idt_ntb_msg_count() - get the number of message registers (NTB API callback) + * @ntb: NTB device context. + * + * IDT PCIe-switches support four message registers. + * + * Return: the number of message registers. + */ +static int idt_ntb_msg_count(struct ntb_dev *ntb) +{ + return IDT_MSG_CNT; +} + +/* + * idt_ntb_msg_inbits() - get a bitfield of inbound message registers status + * (NTB API callback) + * @ntb: NTB device context. + * + * NT message status register is shared between inbound and outbound message + * registers status + * + * Return: bitfield of inbound message registers. + */ +static u64 idt_ntb_msg_inbits(struct ntb_dev *ntb) +{ + return (u64)IDT_INMSG_MASK; +} + +/* + * idt_ntb_msg_outbits() - get a bitfield of outbound message registers status + * (NTB API callback) + * @ntb: NTB device context. + * + * NT message status register is shared between inbound and outbound message + * registers status + * + * Return: bitfield of outbound message registers. + */ +static u64 idt_ntb_msg_outbits(struct ntb_dev *ntb) +{ + return (u64)IDT_OUTMSG_MASK; +} + +/* + * idt_ntb_msg_read_sts() - read the message registers status (NTB API callback) + * @ntb: NTB device context. + * + * IDT PCIe-switches expose message status registers to notify drivers of + * incoming data and failures in case if peer message register isn't freed. + * + * Return: status bits of message registers + */ +static u64 idt_ntb_msg_read_sts(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_nt_read(ndev, IDT_NT_MSGSTS); +} + +/* + * idt_ntb_msg_clear_sts() - clear status bits of message registers + * (NTB API callback) + * @ntb: NTB device context. + * @sts_bits: Status bits to clear. + * + * Clear bits in the status register by writing ones. + * + * NOTE! Invalid bits are always considered cleared so it's not an error + * to clear them over. + * + * Return: always zero as success. + */ +static int idt_ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_nt_write(ndev, IDT_NT_MSGSTS, sts_bits); + + return 0; +} + +/* + * idt_ntb_msg_set_mask() - set mask of message register status bits + * (NTB API callback) + * @ntb: NTB device context. + * @mask_bits: Mask bits. + * + * Mask the message status bits from raising an IRQ. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_reg_set_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock, + IDT_MSG_MASK, mask_bits); +} + +/* + * idt_ntb_msg_clear_mask() - clear message registers mask + * (NTB API callback) + * @ntb: NTB device context. + * @mask_bits: Mask bits. + * + * Clear mask of message status bits IRQs. + * + * Return: always zero as success. + */ +static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_reg_clear_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock, + mask_bits); + + return 0; +} + +/* + * idt_ntb_msg_read() - read message register with specified index + * (NTB API callback) + * @ntb: NTB device context. + * @pidx: OUT - Port index of peer device a message retrieved from + * @midx: Message register index + * + * Read data from the specified message register and source register. + * + * Return: inbound message register value. + */ +static u32 idt_ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (midx < 0 || IDT_MSG_CNT <= midx) + return ~(u32)0; + + /* Retrieve source port index of the message */ + if (pidx != NULL) { + u32 srcpart; + + srcpart = idt_nt_read(ndev, ntdata_tbl.msgs[midx].src); + *pidx = ndev->part_idx_map[srcpart]; + + /* Sanity check partition index (for initial case) */ + if (*pidx == -EINVAL) + *pidx = 0; + } + + /* Retrieve data of the corresponding message register */ + return idt_nt_read(ndev, ntdata_tbl.msgs[midx].in); +} + +/* + * idt_ntb_peer_msg_write() - write data to the specified message register + * (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device a message being sent to + * @midx: Message register index + * @msg: Data to send + * + * Just try to send data to a peer. Message status register should be + * checked by client driver. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, + u32 msg) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + unsigned long irqflags; + u32 swpmsgctl = 0; + + if (midx < 0 || IDT_MSG_CNT <= midx) + return -EINVAL; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + /* Collect the routing information */ + swpmsgctl = SET_FIELD(SWPxMSGCTL_REG, 0, midx) | + SET_FIELD(SWPxMSGCTL_PART, 0, ndev->peers[pidx].part); + + /* Lock the messages routing table of the specified register */ + spin_lock_irqsave(&ndev->msg_locks[midx], irqflags); + /* Set the route and send the data */ + idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl); + idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg); + /* Unlock the messages routing table */ + spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags); + + /* Client driver shall check the status register */ + return 0; +} + +/*============================================================================= + * 7. Temperature sensor operations + * + * IDT PCIe-switch has an embedded temperature sensor, which can be used to + * check current chip core temperature. Since a workload environment can be + * different on different platforms, an offset and ADC/filter settings can be + * specified. Although the offset configuration is only exposed to the sysfs + * hwmon interface at the moment. The rest of the settings can be adjusted + * for instance by the BIOS/EEPROM firmware. + *============================================================================= + */ + +/* + * idt_get_deg() - convert millidegree Celsius value to just degree + * @mdegC: IN - millidegree Celsius value + * + * Return: Degree corresponding to the passed millidegree value + */ +static inline s8 idt_get_deg(long mdegC) +{ + return mdegC / 1000; +} + +/* + * idt_get_frac() - retrieve 0/0.5 fraction of the millidegree Celsius value + * @mdegC: IN - millidegree Celsius value + * + * Return: 0/0.5 degree fraction of the passed millidegree value + */ +static inline u8 idt_get_deg_frac(long mdegC) +{ + return (mdegC % 1000) >= 500 ? 5 : 0; +} + +/* + * idt_get_temp_fmt() - convert millidegree Celsius value to 0:7:1 format + * @mdegC: IN - millidegree Celsius value + * + * Return: 0:7:1 format acceptable by the IDT temperature sensor + */ +static inline u8 idt_temp_get_fmt(long mdegC) +{ + return (idt_get_deg(mdegC) << 1) | (idt_get_deg_frac(mdegC) ? 1 : 0); +} + +/* + * idt_get_temp_sval() - convert temp sample to signed millidegree Celsius + * @data: IN - shifted to LSB 8-bits temperature sample + * + * Return: signed millidegree Celsius + */ +static inline long idt_get_temp_sval(u32 data) +{ + return ((s8)data / 2) * 1000 + (data & 0x1 ? 500 : 0); +} + +/* + * idt_get_temp_sval() - convert temp sample to unsigned millidegree Celsius + * @data: IN - shifted to LSB 8-bits temperature sample + * + * Return: unsigned millidegree Celsius + */ +static inline long idt_get_temp_uval(u32 data) +{ + return (data / 2) * 1000 + (data & 0x1 ? 500 : 0); +} + +/* + * idt_read_temp() - read temperature from chip sensor + * @ntb: NTB device context. + * @type: IN - type of the temperature value to read + * @val: OUT - integer value of temperature in millidegree Celsius + */ +static void idt_read_temp(struct idt_ntb_dev *ndev, + const enum idt_temp_val type, long *val) +{ + u32 data; + + /* Alter the temperature field in accordance with the passed type */ + switch (type) { + case IDT_TEMP_CUR: + data = GET_FIELD(TMPSTS_TEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_LOW: + data = GET_FIELD(TMPSTS_LTEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_HIGH: + data = GET_FIELD(TMPSTS_HTEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_OFFSET: + /* This is the only field with signed 0:7:1 format */ + data = GET_FIELD(TMPADJ_OFFSET, + idt_sw_read(ndev, IDT_SW_TMPADJ)); + *val = idt_get_temp_sval(data); + return; + default: + data = GET_FIELD(TMPSTS_TEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + } + + /* The rest of the fields accept unsigned 0:7:1 format */ + *val = idt_get_temp_uval(data); +} + +/* + * idt_write_temp() - write temperature to the chip sensor register + * @ntb: NTB device context. + * @type: IN - type of the temperature value to change + * @val: IN - integer value of temperature in millidegree Celsius + */ +static void idt_write_temp(struct idt_ntb_dev *ndev, + const enum idt_temp_val type, const long val) +{ + unsigned int reg; + u32 data; + u8 fmt; + + /* Retrieve the properly formatted temperature value */ + fmt = idt_temp_get_fmt(val); + + mutex_lock(&ndev->hwmon_mtx); + switch (type) { + case IDT_TEMP_LOW: + reg = IDT_SW_TMPALARM; + data = SET_FIELD(TMPALARM_LTEMP, idt_sw_read(ndev, reg), fmt) & + ~IDT_TMPALARM_IRQ_MASK; + break; + case IDT_TEMP_HIGH: + reg = IDT_SW_TMPALARM; + data = SET_FIELD(TMPALARM_HTEMP, idt_sw_read(ndev, reg), fmt) & + ~IDT_TMPALARM_IRQ_MASK; + break; + case IDT_TEMP_OFFSET: + reg = IDT_SW_TMPADJ; + data = SET_FIELD(TMPADJ_OFFSET, idt_sw_read(ndev, reg), fmt); + break; + default: + goto inval_spin_unlock; + } + + idt_sw_write(ndev, reg, data); + +inval_spin_unlock: + mutex_unlock(&ndev->hwmon_mtx); +} + +/* + * idt_sysfs_show_temp() - printout corresponding temperature value + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out + * + * Return: Number of written symbols or negative error + */ +static ssize_t idt_sysfs_show_temp(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + enum idt_temp_val type = attr->index; + long mdeg; + + idt_read_temp(ndev, type, &mdeg); + return sprintf(buf, "%ld\n", mdeg); +} + +/* + * idt_sysfs_set_temp() - set corresponding temperature value + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out + * @count: Size of the passed buffer + * + * Return: Number of written symbols or negative error + */ +static ssize_t idt_sysfs_set_temp(struct device *dev, + struct device_attribute *da, const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + enum idt_temp_val type = attr->index; + long mdeg; + int ret; + + ret = kstrtol(buf, 10, &mdeg); + if (ret) + return ret; + + /* Clamp the passed value in accordance with the type */ + if (type == IDT_TEMP_OFFSET) + mdeg = clamp_val(mdeg, IDT_TEMP_MIN_OFFSET, + IDT_TEMP_MAX_OFFSET); + else + mdeg = clamp_val(mdeg, IDT_TEMP_MIN_MDEG, IDT_TEMP_MAX_MDEG); + + idt_write_temp(ndev, type, mdeg); + + return count; +} + +/* + * idt_sysfs_reset_hist() - reset temperature history + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out + * @count: Size of the passed buffer + * + * Return: Number of written symbols or negative error + */ +static ssize_t idt_sysfs_reset_hist(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + + /* Just set the maximal value to the lowest temperature field and + * minimal value to the highest temperature field + */ + idt_write_temp(ndev, IDT_TEMP_LOW, IDT_TEMP_MAX_MDEG); + idt_write_temp(ndev, IDT_TEMP_HIGH, IDT_TEMP_MIN_MDEG); + + return count; +} + +/* + * Hwmon IDT sysfs attributes + */ +static SENSOR_DEVICE_ATTR(temp1_input, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_CUR); +static SENSOR_DEVICE_ATTR(temp1_lowest, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_LOW); +static SENSOR_DEVICE_ATTR(temp1_highest, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_HIGH); +static SENSOR_DEVICE_ATTR(temp1_offset, 0644, idt_sysfs_show_temp, + idt_sysfs_set_temp, IDT_TEMP_OFFSET); +static DEVICE_ATTR(temp1_reset_history, 0200, NULL, idt_sysfs_reset_hist); + +/* + * Hwmon IDT sysfs attributes group + */ +static struct attribute *idt_temp_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_lowest.dev_attr.attr, + &sensor_dev_attr_temp1_highest.dev_attr.attr, + &sensor_dev_attr_temp1_offset.dev_attr.attr, + &dev_attr_temp1_reset_history.attr, + NULL +}; +ATTRIBUTE_GROUPS(idt_temp); + +/* + * idt_init_temp() - initialize temperature sensor interface + * @ndev: IDT NTB hardware driver descriptor + * + * Simple sensor initializarion method is responsible for device switching + * on and resource management based hwmon interface registration. Note, that + * since the device is shared we won't disable it on remove, but leave it + * working until the system is powered off. + */ +static void idt_init_temp(struct idt_ntb_dev *ndev) +{ + struct device *hwmon; + + /* Enable sensor if it hasn't been already */ + idt_sw_write(ndev, IDT_SW_TMPCTL, 0x0); + + /* Initialize hwmon interface fields */ + mutex_init(&ndev->hwmon_mtx); + + hwmon = devm_hwmon_device_register_with_groups(&ndev->ntb.pdev->dev, + ndev->swcfg->name, ndev, idt_temp_groups); + if (IS_ERR(hwmon)) { + dev_err(&ndev->ntb.pdev->dev, "Couldn't create hwmon device"); + return; + } + + dev_dbg(&ndev->ntb.pdev->dev, "Temperature HWmon interface registered"); +} + +/*============================================================================= + * 8. ISRs related operations + * + * IDT PCIe-switch has strangely developed IRQ system. There is just one + * interrupt vector for doorbell and message registers. So the hardware driver + * can't determine actual source of IRQ if, for example, message event happened + * while any of unmasked doorbell is still set. The similar situation may be if + * switch or temperature sensor events pop up. The difference is that SEVENT + * and TMPSENSOR bits of NT interrupt status register can be cleaned by + * IRQ handler so a next interrupt request won't have false handling of + * corresponding events. + * The hardware driver has only bottom-half handler of the IRQ, since if any + * of events happened the device won't raise it again before the last one is + * handled by clearing of corresponding NTINTSTS bit. + *============================================================================= + */ + +static irqreturn_t idt_thread_isr(int irq, void *devid); + +/* + * idt_init_isr() - initialize PCIe interrupt handler + * @ndev: IDT NTB hardware driver descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_init_isr(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + u32 ntint_mask; + int ret; + + /* Allocate just one interrupt vector for the ISR */ + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (ret != 1) { + dev_err(&pdev->dev, "Failed to allocate IRQ vector"); + return ret; + } + + /* Retrieve the IRQ vector */ + ret = pci_irq_vector(pdev, 0); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get IRQ vector"); + goto err_free_vectors; + } + + /* Set the IRQ handler */ + ret = devm_request_threaded_irq(&pdev->dev, ret, NULL, idt_thread_isr, + IRQF_ONESHOT, NTB_IRQNAME, ndev); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to set MSI IRQ handler, %d", ret); + goto err_free_vectors; + } + + /* Unmask Message/Doorbell/SE interrupts */ + ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL; + idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask); + + /* From now on the interrupts are enabled */ + dev_dbg(&pdev->dev, "NTB interrupts initialized"); + + return 0; + +err_free_vectors: + pci_free_irq_vectors(pdev); + + return ret; +} + +/* + * idt_deinit_ist() - deinitialize PCIe interrupt handler + * @ndev: IDT NTB hardware driver descriptor + * + * Disable corresponding interrupts and free allocated IRQ vectors. + */ +static void idt_deinit_isr(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + u32 ntint_mask; + + /* Mask interrupts back */ + ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) | IDT_NTINTMSK_ALL; + idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask); + + /* Manually free IRQ otherwise PCI free irq vectors will fail */ + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 0), ndev); + + /* Free allocated IRQ vectors */ + pci_free_irq_vectors(pdev); + + dev_dbg(&pdev->dev, "NTB interrupts deinitialized"); +} + +/* + * idt_thread_isr() - NT function interrupts handler + * @irq: IRQ number + * @devid: Custom buffer + * + * It reads current NT interrupts state register and handles all the event + * it declares. + * The method is bottom-half routine of actual default PCIe IRQ handler. + */ +static irqreturn_t idt_thread_isr(int irq, void *devid) +{ + struct idt_ntb_dev *ndev = devid; + bool handled = false; + u32 ntint_sts; + + /* Read the NT interrupts status register */ + ntint_sts = idt_nt_read(ndev, IDT_NT_NTINTSTS); + + /* Handle messaging interrupts */ + if (ntint_sts & IDT_NTINTSTS_MSG) { + idt_msg_isr(ndev, ntint_sts); + handled = true; + } + + /* Handle doorbell interrupts */ + if (ntint_sts & IDT_NTINTSTS_DBELL) { + idt_db_isr(ndev, ntint_sts); + handled = true; + } + + /* Handle switch event interrupts */ + if (ntint_sts & IDT_NTINTSTS_SEVENT) { + idt_se_isr(ndev, ntint_sts); + handled = true; + } + + dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts); + + return handled ? IRQ_HANDLED : IRQ_NONE; +} + +/*=========================================================================== + * 9. NTB hardware driver initialization + *=========================================================================== + */ + +/* + * NTB API operations + */ +static const struct ntb_dev_ops idt_ntb_ops = { + .port_number = idt_ntb_port_number, + .peer_port_count = idt_ntb_peer_port_count, + .peer_port_number = idt_ntb_peer_port_number, + .peer_port_idx = idt_ntb_peer_port_idx, + .link_is_up = idt_ntb_link_is_up, + .link_enable = idt_ntb_link_enable, + .link_disable = idt_ntb_link_disable, + .mw_count = idt_ntb_mw_count, + .mw_get_align = idt_ntb_mw_get_align, + .peer_mw_count = idt_ntb_peer_mw_count, + .peer_mw_get_addr = idt_ntb_peer_mw_get_addr, + .peer_mw_set_trans = idt_ntb_peer_mw_set_trans, + .peer_mw_clear_trans = idt_ntb_peer_mw_clear_trans, + .db_valid_mask = idt_ntb_db_valid_mask, + .db_read = idt_ntb_db_read, + .db_clear = idt_ntb_db_clear, + .db_read_mask = idt_ntb_db_read_mask, + .db_set_mask = idt_ntb_db_set_mask, + .db_clear_mask = idt_ntb_db_clear_mask, + .peer_db_set = idt_ntb_peer_db_set, + .msg_count = idt_ntb_msg_count, + .msg_inbits = idt_ntb_msg_inbits, + .msg_outbits = idt_ntb_msg_outbits, + .msg_read_sts = idt_ntb_msg_read_sts, + .msg_clear_sts = idt_ntb_msg_clear_sts, + .msg_set_mask = idt_ntb_msg_set_mask, + .msg_clear_mask = idt_ntb_msg_clear_mask, + .msg_read = idt_ntb_msg_read, + .peer_msg_write = idt_ntb_peer_msg_write +}; + +/* + * idt_register_device() - register IDT NTB device + * @ndev: IDT NTB hardware driver descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_register_device(struct idt_ntb_dev *ndev) +{ + int ret; + + /* Initialize the rest of NTB device structure and register it */ + ndev->ntb.ops = &idt_ntb_ops; + ndev->ntb.topo = NTB_TOPO_SWITCH; + + ret = ntb_register_device(&ndev->ntb); + if (ret != 0) { + dev_err(&ndev->ntb.pdev->dev, "Failed to register NTB device"); + return ret; + } + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device successfully registered"); + + return 0; +} + +/* + * idt_unregister_device() - unregister IDT NTB device + * @ndev: IDT NTB hardware driver descriptor + */ +static void idt_unregister_device(struct idt_ntb_dev *ndev) +{ + /* Just unregister the NTB device */ + ntb_unregister_device(&ndev->ntb); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device unregistered"); +} + +/*============================================================================= + * 10. DebugFS node initialization + *============================================================================= + */ + +static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp); + +/* + * Driver DebugFS info file operations + */ +static const struct file_operations idt_dbgfs_info_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = idt_dbgfs_info_read +}; + +/* + * idt_dbgfs_info_read() - DebugFS read info node callback + * @file: File node descriptor. + * @ubuf: User-space buffer to put data to + * @count: Size of the buffer + * @offp: Offset within the buffer + */ +static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct idt_ntb_dev *ndev = filp->private_data; + unsigned char idx, pidx, cnt; + unsigned long irqflags, mdeg; + ssize_t ret = 0, off = 0; + enum ntb_speed speed; + enum ntb_width width; + char *strbuf; + size_t size; + u32 data; + + /* Lets limit the buffer size the way the Intel/AMD drivers do */ + size = min_t(size_t, count, 0x1000U); + + /* Allocate the memory for the buffer */ + strbuf = kmalloc(size, GFP_KERNEL); + if (strbuf == NULL) + return -ENOMEM; + + /* Put the data into the string buffer */ + off += scnprintf(strbuf + off, size - off, + "\n\t\tIDT NTB device Information:\n\n"); + + /* General local device configurations */ + off += scnprintf(strbuf + off, size - off, + "Local Port %hhu, Partition %hhu\n", ndev->port, ndev->part); + + /* Peer ports information */ + off += scnprintf(strbuf + off, size - off, "Peers:\n"); + for (idx = 0; idx < ndev->peer_cnt; idx++) { + off += scnprintf(strbuf + off, size - off, + "\t%hhu. Port %hhu, Partition %hhu\n", + idx, ndev->peers[idx].port, ndev->peers[idx].part); + } + + /* Links status */ + data = idt_ntb_link_is_up(&ndev->ntb, &speed, &width); + off += scnprintf(strbuf + off, size - off, + "NTB link status\t- 0x%08x, ", data); + off += scnprintf(strbuf + off, size - off, "PCIe Gen %d x%d lanes\n", + speed, width); + + /* Mapping table entries */ + off += scnprintf(strbuf + off, size - off, "NTB Mapping Table:\n"); + for (idx = 0; idx < IDT_MTBL_ENTRY_CNT; idx++) { + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, idx); + data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + /* Print valid entries only */ + if (data & IDT_NTMTBLDATA_VALID) { + off += scnprintf(strbuf + off, size - off, + "\t%hhu. Partition %d, Requester ID 0x%04x\n", + idx, GET_FIELD(NTMTBLDATA_PART, data), + GET_FIELD(NTMTBLDATA_REQID, data)); + } + } + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Outbound memory windows information */ + off += scnprintf(strbuf + off, size - off, + "Outbound Memory Windows:\n"); + for (idx = 0; idx < ndev->mw_cnt; idx += cnt) { + data = ndev->mws[idx].type; + cnt = idt_get_mw_count(data); + + /* Print Memory Window information */ + if (data == IDT_MW_DIR) + off += scnprintf(strbuf + off, size - off, + "\t%hhu.\t", idx); + else + off += scnprintf(strbuf + off, size - off, + "\t%hhu-%d.\t", idx, idx + cnt - 1); + + off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ", + idt_get_mw_name(data), ndev->mws[idx].bar); + + off += scnprintf(strbuf + off, size - off, + "Address align 0x%08llx, ", ndev->mws[idx].addr_align); + + off += scnprintf(strbuf + off, size - off, + "Size align 0x%08llx, Size max %llu\n", + ndev->mws[idx].size_align, ndev->mws[idx].size_max); + } + + /* Inbound memory windows information */ + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + off += scnprintf(strbuf + off, size - off, + "Inbound Memory Windows for peer %hhu (Port %hhu):\n", + pidx, ndev->peers[pidx].port); + + /* Print Memory Windows information */ + for (idx = 0; idx < ndev->peers[pidx].mw_cnt; idx += cnt) { + data = ndev->peers[pidx].mws[idx].type; + cnt = idt_get_mw_count(data); + + if (data == IDT_MW_DIR) + off += scnprintf(strbuf + off, size - off, + "\t%hhu.\t", idx); + else + off += scnprintf(strbuf + off, size - off, + "\t%hhu-%d.\t", idx, idx + cnt - 1); + + off += scnprintf(strbuf + off, size - off, + "%s BAR%hhu, ", idt_get_mw_name(data), + ndev->peers[pidx].mws[idx].bar); + + off += scnprintf(strbuf + off, size - off, + "Address align 0x%08llx, ", + ndev->peers[pidx].mws[idx].addr_align); + + off += scnprintf(strbuf + off, size - off, + "Size align 0x%08llx, Size max %llu\n", + ndev->peers[pidx].mws[idx].size_align, + ndev->peers[pidx].mws[idx].size_max); + } + } + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Doorbell information */ + data = idt_sw_read(ndev, IDT_SW_GDBELLSTS); + off += scnprintf(strbuf + off, size - off, + "Global Doorbell state\t- 0x%08x\n", data); + data = idt_ntb_db_read(&ndev->ntb); + off += scnprintf(strbuf + off, size - off, + "Local Doorbell state\t- 0x%08x\n", data); + data = idt_nt_read(ndev, IDT_NT_INDBELLMSK); + off += scnprintf(strbuf + off, size - off, + "Local Doorbell mask\t- 0x%08x\n", data); + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Messaging information */ + off += scnprintf(strbuf + off, size - off, + "Message event valid\t- 0x%08x\n", IDT_MSG_MASK); + data = idt_ntb_msg_read_sts(&ndev->ntb); + off += scnprintf(strbuf + off, size - off, + "Message event status\t- 0x%08x\n", data); + data = idt_nt_read(ndev, IDT_NT_MSGSTSMSK); + off += scnprintf(strbuf + off, size - off, + "Message event mask\t- 0x%08x\n", data); + off += scnprintf(strbuf + off, size - off, + "Message data:\n"); + for (idx = 0; idx < IDT_MSG_CNT; idx++) { + int src; + data = idt_ntb_msg_read(&ndev->ntb, &src, idx); + off += scnprintf(strbuf + off, size - off, + "\t%hhu. 0x%08x from peer %d (Port %hhu)\n", + idx, data, src, ndev->peers[src].port); + } + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Current temperature */ + idt_read_temp(ndev, IDT_TEMP_CUR, &mdeg); + off += scnprintf(strbuf + off, size - off, + "Switch temperature\t\t- %hhd.%hhuC\n", + idt_get_deg(mdeg), idt_get_deg_frac(mdeg)); + + /* Copy the buffer to the User Space */ + ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off); + kfree(strbuf); + + return ret; +} + +/* + * idt_init_dbgfs() - initialize DebugFS node + * @ndev: IDT NTB hardware driver descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_init_dbgfs(struct idt_ntb_dev *ndev) +{ + char devname[64]; + + /* If the top directory is not created then do nothing */ + if (IS_ERR_OR_NULL(dbgfs_topdir)) { + dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent"); + return PTR_ERR_OR_ZERO(dbgfs_topdir); + } + + /* Create the info file node */ + snprintf(devname, 64, "info:%s", pci_name(ndev->ntb.pdev)); + ndev->dbgfs_info = debugfs_create_file(devname, 0400, dbgfs_topdir, + ndev, &idt_dbgfs_info_ops); + if (IS_ERR(ndev->dbgfs_info)) { + dev_dbg(&ndev->ntb.pdev->dev, "Failed to create DebugFS node"); + return PTR_ERR(ndev->dbgfs_info); + } + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node created"); + + return 0; +} + +/* + * idt_deinit_dbgfs() - deinitialize DebugFS node + * @ndev: IDT NTB hardware driver descriptor + * + * Just discard the info node from DebugFS + */ +static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev) +{ + debugfs_remove(ndev->dbgfs_info); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node discarded"); +} + +/*============================================================================= + * 11. Basic PCIe device initialization + *============================================================================= + */ + +/* + * idt_check_setup() - Check whether the IDT PCIe-swtich is properly + * pre-initialized + * @pdev: Pointer to the PCI device descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_check_setup(struct pci_dev *pdev) +{ + u32 data; + int ret; + + /* Read the BARSETUP0 */ + ret = pci_read_config_dword(pdev, IDT_NT_BARSETUP0, &data); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to read BARSETUP0 config register"); + return ret; + } + + /* Check whether the BAR0 register is enabled to be of config space */ + if (!(data & IDT_BARSETUP_EN) || !(data & IDT_BARSETUP_MODE_CFG)) { + dev_err(&pdev->dev, "BAR0 doesn't map config space"); + return -EINVAL; + } + + /* Configuration space BAR0 must have certain size */ + if ((data & IDT_BARSETUP_SIZE_MASK) != IDT_BARSETUP_SIZE_CFG) { + dev_err(&pdev->dev, "Invalid size of config space"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "NTB device pre-initialized correctly"); + + return 0; +} + +/* + * Create the IDT PCIe-switch driver descriptor + * @pdev: Pointer to the PCI device descriptor + * @id: IDT PCIe-device configuration + * + * It just allocates a memory for IDT PCIe-switch device structure and + * initializes some commonly used fields. + * + * No need of release method, since managed device resource is used for + * memory allocation. + * + * Return: pointer to the descriptor, otherwise a negative error number. + */ +static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct idt_ntb_dev *ndev; + + /* Allocate memory for the IDT PCIe-device descriptor */ + ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL); + if (!ndev) { + dev_err(&pdev->dev, "Memory allocation failed for descriptor"); + return ERR_PTR(-ENOMEM); + } + + /* Save the IDT PCIe-switch ports configuration */ + ndev->swcfg = (struct idt_89hpes_cfg *)id->driver_data; + /* Save the PCI-device pointer inside the NTB device structure */ + ndev->ntb.pdev = pdev; + + /* Initialize spin locker of Doorbell, Message and GASA registers */ + spin_lock_init(&ndev->db_mask_lock); + spin_lock_init(&ndev->msg_mask_lock); + spin_lock_init(&ndev->gasa_lock); + + dev_info(&pdev->dev, "IDT %s discovered", ndev->swcfg->name); + + dev_dbg(&pdev->dev, "NTB device descriptor created"); + + return ndev; +} + +/* + * idt_init_pci() - initialize the basic PCI-related subsystem + * @ndev: Pointer to the IDT PCIe-switch driver descriptor + * + * Managed device resources will be freed automatically in case of failure or + * driver detachment. + * + * Return: zero on success, otherwise negative error number. + */ +static int idt_init_pci(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + int ret; + + /* Initialize the bit mask of PCI/NTB DMA */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret != 0) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to set DMA bit mask\n"); + return ret; + } + dev_warn(&pdev->dev, "Cannot set DMA highmem bit mask\n"); + } + + /* + * The PCI core enables device error reporting. It's not critical to + * have AER disabled in the kernel. + * + * Cleanup nonfatal error status before getting to init. + */ + pci_aer_clear_nonfatal_status(pdev); + + /* First enable the PCI device */ + ret = pcim_enable_device(pdev); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to enable PCIe device\n"); + return ret; + } + + /* + * Enable the bus mastering, which effectively enables MSI IRQs and + * Request TLPs translation + */ + pci_set_master(pdev); + + /* Request all BARs resources and map BAR0 only */ + ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request resources\n"); + goto err_clear_master; + } + + /* Retrieve virtual address of BAR0 - PCI configuration space */ + ndev->cfgspc = pcim_iomap_table(pdev)[0]; + + /* Put the IDT driver data pointer to the PCI-device private pointer */ + pci_set_drvdata(pdev, ndev); + + dev_dbg(&pdev->dev, "NT-function PCIe interface initialized"); + + return 0; + +err_clear_master: + pci_clear_master(pdev); + + return ret; +} + +/* + * idt_deinit_pci() - deinitialize the basic PCI-related subsystem + * @ndev: Pointer to the IDT PCIe-switch driver descriptor + * + * Managed resources will be freed on the driver detachment + */ +static void idt_deinit_pci(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + + /* Clean up the PCI-device private data pointer */ + pci_set_drvdata(pdev, NULL); + + /* Clear the bus master disabling the Request TLPs translation */ + pci_clear_master(pdev); + + dev_dbg(&pdev->dev, "NT-function PCIe interface cleared"); +} + +/*=========================================================================== + * 12. PCI bus callback functions + *=========================================================================== + */ + +/* + * idt_pci_probe() - PCI device probe callback + * @pdev: Pointer to PCI device structure + * @id: PCIe device custom descriptor + * + * Return: zero on success, otherwise negative error number + */ +static int idt_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct idt_ntb_dev *ndev; + int ret; + + /* Check whether IDT PCIe-switch is properly pre-initialized */ + ret = idt_check_setup(pdev); + if (ret != 0) + return ret; + + /* Allocate the memory for IDT NTB device data */ + ndev = idt_create_dev(pdev, id); + if (IS_ERR(ndev)) + return PTR_ERR(ndev); + + /* Initialize the basic PCI subsystem of the device */ + ret = idt_init_pci(ndev); + if (ret != 0) + return ret; + + /* Scan ports of the IDT PCIe-switch */ + (void)idt_scan_ports(ndev); + + /* Initialize NTB link events subsystem */ + idt_init_link(ndev); + + /* Initialize MWs subsystem */ + ret = idt_init_mws(ndev); + if (ret != 0) + goto err_deinit_link; + + /* Initialize Messaging subsystem */ + idt_init_msg(ndev); + + /* Initialize hwmon interface */ + idt_init_temp(ndev); + + /* Initialize IDT interrupts handler */ + ret = idt_init_isr(ndev); + if (ret != 0) + goto err_deinit_link; + + /* Register IDT NTB devices on the NTB bus */ + ret = idt_register_device(ndev); + if (ret != 0) + goto err_deinit_isr; + + /* Initialize DebugFS info node */ + (void)idt_init_dbgfs(ndev); + + /* IDT PCIe-switch NTB driver is finally initialized */ + dev_info(&pdev->dev, "IDT NTB device is ready"); + + /* May the force be with us... */ + return 0; + +err_deinit_isr: + idt_deinit_isr(ndev); +err_deinit_link: + idt_deinit_link(ndev); + idt_deinit_pci(ndev); + + return ret; +} + +/* + * idt_pci_probe() - PCI device remove callback + * @pdev: Pointer to PCI device structure + */ +static void idt_pci_remove(struct pci_dev *pdev) +{ + struct idt_ntb_dev *ndev = pci_get_drvdata(pdev); + + /* Deinit the DebugFS node */ + idt_deinit_dbgfs(ndev); + + /* Unregister NTB device */ + idt_unregister_device(ndev); + + /* Stop the interrupts handling */ + idt_deinit_isr(ndev); + + /* Deinitialize link event subsystem */ + idt_deinit_link(ndev); + + /* Deinit basic PCI subsystem */ + idt_deinit_pci(ndev); + + /* IDT PCIe-switch NTB driver is finally initialized */ + dev_info(&pdev->dev, "IDT NTB device is removed"); + + /* Sayonara... */ +} + +/* + * IDT PCIe-switch models ports configuration structures + */ +static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { + .name = "89HPES24NT6AG2", + .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12} +}; +static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { + .name = "89HPES32NT8AG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { + .name = "89HPES32NT8BG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { + .name = "89HPES12NT12G2", + .port_cnt = 3, .ports = {0, 8, 16} +}; +static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { + .name = "89HPES16NT16G2", + .port_cnt = 4, .ports = {0, 8, 12, 16} +}; +static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { + .name = "89HPES24NT24G2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { + .name = "89HPES32NT24AG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { + .name = "89HPES32NT24BG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; + +/* + * PCI-ids table of the supported IDT PCIe-switch devices + */ +static const struct pci_device_id idt_pci_tbl[] = { + {IDT_PCI_DEVICE_IDS(89HPES24NT6AG2, idt_89hpes24nt6ag2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT8AG2, idt_89hpes32nt8ag2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT8BG2, idt_89hpes32nt8bg2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES12NT12G2, idt_89hpes12nt12g2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES16NT16G2, idt_89hpes16nt16g2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES24NT24G2, idt_89hpes24nt24g2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT24AG2, idt_89hpes32nt24ag2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT24BG2, idt_89hpes32nt24bg2_config)}, + {0} +}; +MODULE_DEVICE_TABLE(pci, idt_pci_tbl); + +/* + * IDT PCIe-switch NT-function device driver structure definition + */ +static struct pci_driver idt_pci_driver = { + .name = KBUILD_MODNAME, + .probe = idt_pci_probe, + .remove = idt_pci_remove, + .id_table = idt_pci_tbl, +}; + +static int __init idt_pci_driver_init(void) +{ + int ret; + pr_info("%s %s\n", NTB_DESC, NTB_VER); + + /* Create the top DebugFS directory if the FS is initialized */ + if (debugfs_initialized()) + dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + /* Register the NTB hardware driver to handle the PCI device */ + ret = pci_register_driver(&idt_pci_driver); + if (ret) + debugfs_remove_recursive(dbgfs_topdir); + + return ret; +} +module_init(idt_pci_driver_init); + +static void __exit idt_pci_driver_exit(void) +{ + /* Unregister the NTB hardware driver */ + pci_unregister_driver(&idt_pci_driver); + + /* Discard the top DebugFS directory */ + debugfs_remove_recursive(dbgfs_topdir); +} +module_exit(idt_pci_driver_exit); + diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.h b/drivers/ntb/hw/idt/ntb_hw_idt.h new file mode 100644 index 0000000000..2f1aa121b0 --- /dev/null +++ b/drivers/ntb/hw/idt/ntb_hw_idt.h @@ -0,0 +1,1226 @@ +/* + * This file is provided under a GPLv2 license. When using or + * redistributing this file, you may do so under that license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, one can be found http://www.gnu.org/licenses/. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * IDT PCIe-switch NTB Linux driver + * + * Contact Information: + * Serge Semin , + */ + +#ifndef NTB_HW_IDT_H +#define NTB_HW_IDT_H + +#include +#include +#include +#include +#include +#include +#include + +/* + * Macro is used to create the struct pci_device_id that matches + * the supported IDT PCIe-switches + * @devname: Capitalized name of the particular device + * @data: Variable passed to the driver of the particular device + */ +#define IDT_PCI_DEVICE_IDS(devname, data) \ + .vendor = PCI_VENDOR_ID_IDT, .device = PCI_DEVICE_ID_IDT_##devname, \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ + .class = (PCI_CLASS_BRIDGE_OTHER << 8), .class_mask = (0xFFFF00), \ + .driver_data = (kernel_ulong_t)&data + +/* + * IDT PCIe-switches device IDs + */ +#define PCI_DEVICE_ID_IDT_89HPES24NT6AG2 0x8091 +#define PCI_DEVICE_ID_IDT_89HPES32NT8AG2 0x808F +#define PCI_DEVICE_ID_IDT_89HPES32NT8BG2 0x8088 +#define PCI_DEVICE_ID_IDT_89HPES12NT12G2 0x8092 +#define PCI_DEVICE_ID_IDT_89HPES16NT16G2 0x8090 +#define PCI_DEVICE_ID_IDT_89HPES24NT24G2 0x808E +#define PCI_DEVICE_ID_IDT_89HPES32NT24AG2 0x808C +#define PCI_DEVICE_ID_IDT_89HPES32NT24BG2 0x808A + +/* + * NT-function Configuration Space registers + * NOTE 1) The IDT PCIe-switch internal data is little-endian + * so it must be taken into account in the driver + * internals. + * 2) Additionally the registers should be accessed either + * with byte-enables corresponding to their native size or + * the size of one DWORD + * + * So to simplify the driver code, there is only DWORD-sized read/write + * operations utilized. + */ +/* PCI Express Configuration Space */ +/* PCI Express command/status register (DWORD) */ +#define IDT_NT_PCICMDSTS 0x00004U +/* PCI Express Device Capabilities (DWORD) */ +#define IDT_NT_PCIEDCAP 0x00044U +/* PCI Express Device Control/Status (WORD+WORD) */ +#define IDT_NT_PCIEDCTLSTS 0x00048U +/* PCI Express Link Capabilities (DWORD) */ +#define IDT_NT_PCIELCAP 0x0004CU +/* PCI Express Link Control/Status (WORD+WORD) */ +#define IDT_NT_PCIELCTLSTS 0x00050U +/* PCI Express Device Capabilities 2 (DWORD) */ +#define IDT_NT_PCIEDCAP2 0x00064U +/* PCI Express Device Control 2 (WORD+WORD) */ +#define IDT_NT_PCIEDCTL2 0x00068U +/* PCI Power Management Control and Status (DWORD) */ +#define IDT_NT_PMCSR 0x000C4U +/*==========================================*/ +/* IDT Proprietary NT-port-specific registers */ +/* NT-function main control registers */ +/* NT Endpoint Control (DWORD) */ +#define IDT_NT_NTCTL 0x00400U +/* NT Endpoint Interrupt Status/Mask (DWORD) */ +#define IDT_NT_NTINTSTS 0x00404U +#define IDT_NT_NTINTMSK 0x00408U +/* NT Endpoint Signal Data (DWORD) */ +#define IDT_NT_NTSDATA 0x0040CU +/* NT Endpoint Global Signal (DWORD) */ +#define IDT_NT_NTGSIGNAL 0x00410U +/* Internal Error Reporting Mask 0/1 (DWORD) */ +#define IDT_NT_NTIERRORMSK0 0x00414U +#define IDT_NT_NTIERRORMSK1 0x00418U +/* Doorbel registers */ +/* NT Outbound Doorbell Set (DWORD) */ +#define IDT_NT_OUTDBELLSET 0x00420U +/* NT Inbound Doorbell Status/Mask (DWORD) */ +#define IDT_NT_INDBELLSTS 0x00428U +#define IDT_NT_INDBELLMSK 0x0042CU +/* Message registers */ +/* Outbound Message N (DWORD) */ +#define IDT_NT_OUTMSG0 0x00430U +#define IDT_NT_OUTMSG1 0x00434U +#define IDT_NT_OUTMSG2 0x00438U +#define IDT_NT_OUTMSG3 0x0043CU +/* Inbound Message N (DWORD) */ +#define IDT_NT_INMSG0 0x00440U +#define IDT_NT_INMSG1 0x00444U +#define IDT_NT_INMSG2 0x00448U +#define IDT_NT_INMSG3 0x0044CU +/* Inbound Message Source N (DWORD) */ +#define IDT_NT_INMSGSRC0 0x00450U +#define IDT_NT_INMSGSRC1 0x00454U +#define IDT_NT_INMSGSRC2 0x00458U +#define IDT_NT_INMSGSRC3 0x0045CU +/* Message Status (DWORD) */ +#define IDT_NT_MSGSTS 0x00460U +/* Message Status Mask (DWORD) */ +#define IDT_NT_MSGSTSMSK 0x00464U +/* BAR-setup registers */ +/* BAR N Setup/Limit Address/Lower and Upper Translated Base Address (DWORD) */ +#define IDT_NT_BARSETUP0 0x00470U +#define IDT_NT_BARLIMIT0 0x00474U +#define IDT_NT_BARLTBASE0 0x00478U +#define IDT_NT_BARUTBASE0 0x0047CU +#define IDT_NT_BARSETUP1 0x00480U +#define IDT_NT_BARLIMIT1 0x00484U +#define IDT_NT_BARLTBASE1 0x00488U +#define IDT_NT_BARUTBASE1 0x0048CU +#define IDT_NT_BARSETUP2 0x00490U +#define IDT_NT_BARLIMIT2 0x00494U +#define IDT_NT_BARLTBASE2 0x00498U +#define IDT_NT_BARUTBASE2 0x0049CU +#define IDT_NT_BARSETUP3 0x004A0U +#define IDT_NT_BARLIMIT3 0x004A4U +#define IDT_NT_BARLTBASE3 0x004A8U +#define IDT_NT_BARUTBASE3 0x004ACU +#define IDT_NT_BARSETUP4 0x004B0U +#define IDT_NT_BARLIMIT4 0x004B4U +#define IDT_NT_BARLTBASE4 0x004B8U +#define IDT_NT_BARUTBASE4 0x004BCU +#define IDT_NT_BARSETUP5 0x004C0U +#define IDT_NT_BARLIMIT5 0x004C4U +#define IDT_NT_BARLTBASE5 0x004C8U +#define IDT_NT_BARUTBASE5 0x004CCU +/* NT mapping table registers */ +/* NT Mapping Table Address/Status/Data (DWORD) */ +#define IDT_NT_NTMTBLADDR 0x004D0U +#define IDT_NT_NTMTBLSTS 0x004D4U +#define IDT_NT_NTMTBLDATA 0x004D8U +/* Requester ID (Bus:Device:Function) Capture (DWORD) */ +#define IDT_NT_REQIDCAP 0x004DCU +/* Memory Windows Lookup table registers */ +/* Lookup Table Offset/Lower, Middle and Upper data (DWORD) */ +#define IDT_NT_LUTOFFSET 0x004E0U +#define IDT_NT_LUTLDATA 0x004E4U +#define IDT_NT_LUTMDATA 0x004E8U +#define IDT_NT_LUTUDATA 0x004ECU +/* NT Endpoint Uncorrectable/Correctable Errors Emulation registers (DWORD) */ +#define IDT_NT_NTUEEM 0x004F0U +#define IDT_NT_NTCEEM 0x004F4U +/* Global Address Space Access/Data registers (DWARD) */ +#define IDT_NT_GASAADDR 0x00FF8U +#define IDT_NT_GASADATA 0x00FFCU + +/* + * IDT PCIe-switch Global Configuration and Status registers + */ +/* Port N Configuration register in global space */ +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP0_PCIECMDSTS 0x01004U +#define IDT_SW_NTP0_PCIELCTLSTS 0x01050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP0_NTCTL 0x01400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP0_BARSETUP0 0x01470U +#define IDT_SW_NTP0_BARLIMIT0 0x01474U +#define IDT_SW_NTP0_BARLTBASE0 0x01478U +#define IDT_SW_NTP0_BARUTBASE0 0x0147CU +#define IDT_SW_NTP0_BARSETUP1 0x01480U +#define IDT_SW_NTP0_BARLIMIT1 0x01484U +#define IDT_SW_NTP0_BARLTBASE1 0x01488U +#define IDT_SW_NTP0_BARUTBASE1 0x0148CU +#define IDT_SW_NTP0_BARSETUP2 0x01490U +#define IDT_SW_NTP0_BARLIMIT2 0x01494U +#define IDT_SW_NTP0_BARLTBASE2 0x01498U +#define IDT_SW_NTP0_BARUTBASE2 0x0149CU +#define IDT_SW_NTP0_BARSETUP3 0x014A0U +#define IDT_SW_NTP0_BARLIMIT3 0x014A4U +#define IDT_SW_NTP0_BARLTBASE3 0x014A8U +#define IDT_SW_NTP0_BARUTBASE3 0x014ACU +#define IDT_SW_NTP0_BARSETUP4 0x014B0U +#define IDT_SW_NTP0_BARLIMIT4 0x014B4U +#define IDT_SW_NTP0_BARLTBASE4 0x014B8U +#define IDT_SW_NTP0_BARUTBASE4 0x014BCU +#define IDT_SW_NTP0_BARSETUP5 0x014C0U +#define IDT_SW_NTP0_BARLIMIT5 0x014C4U +#define IDT_SW_NTP0_BARLTBASE5 0x014C8U +#define IDT_SW_NTP0_BARUTBASE5 0x014CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP2_PCIECMDSTS 0x05004U +#define IDT_SW_NTP2_PCIELCTLSTS 0x05050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP2_NTCTL 0x05400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP2_BARSETUP0 0x05470U +#define IDT_SW_NTP2_BARLIMIT0 0x05474U +#define IDT_SW_NTP2_BARLTBASE0 0x05478U +#define IDT_SW_NTP2_BARUTBASE0 0x0547CU +#define IDT_SW_NTP2_BARSETUP1 0x05480U +#define IDT_SW_NTP2_BARLIMIT1 0x05484U +#define IDT_SW_NTP2_BARLTBASE1 0x05488U +#define IDT_SW_NTP2_BARUTBASE1 0x0548CU +#define IDT_SW_NTP2_BARSETUP2 0x05490U +#define IDT_SW_NTP2_BARLIMIT2 0x05494U +#define IDT_SW_NTP2_BARLTBASE2 0x05498U +#define IDT_SW_NTP2_BARUTBASE2 0x0549CU +#define IDT_SW_NTP2_BARSETUP3 0x054A0U +#define IDT_SW_NTP2_BARLIMIT3 0x054A4U +#define IDT_SW_NTP2_BARLTBASE3 0x054A8U +#define IDT_SW_NTP2_BARUTBASE3 0x054ACU +#define IDT_SW_NTP2_BARSETUP4 0x054B0U +#define IDT_SW_NTP2_BARLIMIT4 0x054B4U +#define IDT_SW_NTP2_BARLTBASE4 0x054B8U +#define IDT_SW_NTP2_BARUTBASE4 0x054BCU +#define IDT_SW_NTP2_BARSETUP5 0x054C0U +#define IDT_SW_NTP2_BARLIMIT5 0x054C4U +#define IDT_SW_NTP2_BARLTBASE5 0x054C8U +#define IDT_SW_NTP2_BARUTBASE5 0x054CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP4_PCIECMDSTS 0x09004U +#define IDT_SW_NTP4_PCIELCTLSTS 0x09050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP4_NTCTL 0x09400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP4_BARSETUP0 0x09470U +#define IDT_SW_NTP4_BARLIMIT0 0x09474U +#define IDT_SW_NTP4_BARLTBASE0 0x09478U +#define IDT_SW_NTP4_BARUTBASE0 0x0947CU +#define IDT_SW_NTP4_BARSETUP1 0x09480U +#define IDT_SW_NTP4_BARLIMIT1 0x09484U +#define IDT_SW_NTP4_BARLTBASE1 0x09488U +#define IDT_SW_NTP4_BARUTBASE1 0x0948CU +#define IDT_SW_NTP4_BARSETUP2 0x09490U +#define IDT_SW_NTP4_BARLIMIT2 0x09494U +#define IDT_SW_NTP4_BARLTBASE2 0x09498U +#define IDT_SW_NTP4_BARUTBASE2 0x0949CU +#define IDT_SW_NTP4_BARSETUP3 0x094A0U +#define IDT_SW_NTP4_BARLIMIT3 0x094A4U +#define IDT_SW_NTP4_BARLTBASE3 0x094A8U +#define IDT_SW_NTP4_BARUTBASE3 0x094ACU +#define IDT_SW_NTP4_BARSETUP4 0x094B0U +#define IDT_SW_NTP4_BARLIMIT4 0x094B4U +#define IDT_SW_NTP4_BARLTBASE4 0x094B8U +#define IDT_SW_NTP4_BARUTBASE4 0x094BCU +#define IDT_SW_NTP4_BARSETUP5 0x094C0U +#define IDT_SW_NTP4_BARLIMIT5 0x094C4U +#define IDT_SW_NTP4_BARLTBASE5 0x094C8U +#define IDT_SW_NTP4_BARUTBASE5 0x094CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP6_PCIECMDSTS 0x0D004U +#define IDT_SW_NTP6_PCIELCTLSTS 0x0D050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP6_NTCTL 0x0D400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP6_BARSETUP0 0x0D470U +#define IDT_SW_NTP6_BARLIMIT0 0x0D474U +#define IDT_SW_NTP6_BARLTBASE0 0x0D478U +#define IDT_SW_NTP6_BARUTBASE0 0x0D47CU +#define IDT_SW_NTP6_BARSETUP1 0x0D480U +#define IDT_SW_NTP6_BARLIMIT1 0x0D484U +#define IDT_SW_NTP6_BARLTBASE1 0x0D488U +#define IDT_SW_NTP6_BARUTBASE1 0x0D48CU +#define IDT_SW_NTP6_BARSETUP2 0x0D490U +#define IDT_SW_NTP6_BARLIMIT2 0x0D494U +#define IDT_SW_NTP6_BARLTBASE2 0x0D498U +#define IDT_SW_NTP6_BARUTBASE2 0x0D49CU +#define IDT_SW_NTP6_BARSETUP3 0x0D4A0U +#define IDT_SW_NTP6_BARLIMIT3 0x0D4A4U +#define IDT_SW_NTP6_BARLTBASE3 0x0D4A8U +#define IDT_SW_NTP6_BARUTBASE3 0x0D4ACU +#define IDT_SW_NTP6_BARSETUP4 0x0D4B0U +#define IDT_SW_NTP6_BARLIMIT4 0x0D4B4U +#define IDT_SW_NTP6_BARLTBASE4 0x0D4B8U +#define IDT_SW_NTP6_BARUTBASE4 0x0D4BCU +#define IDT_SW_NTP6_BARSETUP5 0x0D4C0U +#define IDT_SW_NTP6_BARLIMIT5 0x0D4C4U +#define IDT_SW_NTP6_BARLTBASE5 0x0D4C8U +#define IDT_SW_NTP6_BARUTBASE5 0x0D4CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP8_PCIECMDSTS 0x11004U +#define IDT_SW_NTP8_PCIELCTLSTS 0x11050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP8_NTCTL 0x11400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP8_BARSETUP0 0x11470U +#define IDT_SW_NTP8_BARLIMIT0 0x11474U +#define IDT_SW_NTP8_BARLTBASE0 0x11478U +#define IDT_SW_NTP8_BARUTBASE0 0x1147CU +#define IDT_SW_NTP8_BARSETUP1 0x11480U +#define IDT_SW_NTP8_BARLIMIT1 0x11484U +#define IDT_SW_NTP8_BARLTBASE1 0x11488U +#define IDT_SW_NTP8_BARUTBASE1 0x1148CU +#define IDT_SW_NTP8_BARSETUP2 0x11490U +#define IDT_SW_NTP8_BARLIMIT2 0x11494U +#define IDT_SW_NTP8_BARLTBASE2 0x11498U +#define IDT_SW_NTP8_BARUTBASE2 0x1149CU +#define IDT_SW_NTP8_BARSETUP3 0x114A0U +#define IDT_SW_NTP8_BARLIMIT3 0x114A4U +#define IDT_SW_NTP8_BARLTBASE3 0x114A8U +#define IDT_SW_NTP8_BARUTBASE3 0x114ACU +#define IDT_SW_NTP8_BARSETUP4 0x114B0U +#define IDT_SW_NTP8_BARLIMIT4 0x114B4U +#define IDT_SW_NTP8_BARLTBASE4 0x114B8U +#define IDT_SW_NTP8_BARUTBASE4 0x114BCU +#define IDT_SW_NTP8_BARSETUP5 0x114C0U +#define IDT_SW_NTP8_BARLIMIT5 0x114C4U +#define IDT_SW_NTP8_BARLTBASE5 0x114C8U +#define IDT_SW_NTP8_BARUTBASE5 0x114CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP12_PCIECMDSTS 0x19004U +#define IDT_SW_NTP12_PCIELCTLSTS 0x19050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP12_NTCTL 0x19400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP12_BARSETUP0 0x19470U +#define IDT_SW_NTP12_BARLIMIT0 0x19474U +#define IDT_SW_NTP12_BARLTBASE0 0x19478U +#define IDT_SW_NTP12_BARUTBASE0 0x1947CU +#define IDT_SW_NTP12_BARSETUP1 0x19480U +#define IDT_SW_NTP12_BARLIMIT1 0x19484U +#define IDT_SW_NTP12_BARLTBASE1 0x19488U +#define IDT_SW_NTP12_BARUTBASE1 0x1948CU +#define IDT_SW_NTP12_BARSETUP2 0x19490U +#define IDT_SW_NTP12_BARLIMIT2 0x19494U +#define IDT_SW_NTP12_BARLTBASE2 0x19498U +#define IDT_SW_NTP12_BARUTBASE2 0x1949CU +#define IDT_SW_NTP12_BARSETUP3 0x194A0U +#define IDT_SW_NTP12_BARLIMIT3 0x194A4U +#define IDT_SW_NTP12_BARLTBASE3 0x194A8U +#define IDT_SW_NTP12_BARUTBASE3 0x194ACU +#define IDT_SW_NTP12_BARSETUP4 0x194B0U +#define IDT_SW_NTP12_BARLIMIT4 0x194B4U +#define IDT_SW_NTP12_BARLTBASE4 0x194B8U +#define IDT_SW_NTP12_BARUTBASE4 0x194BCU +#define IDT_SW_NTP12_BARSETUP5 0x194C0U +#define IDT_SW_NTP12_BARLIMIT5 0x194C4U +#define IDT_SW_NTP12_BARLTBASE5 0x194C8U +#define IDT_SW_NTP12_BARUTBASE5 0x194CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP16_PCIECMDSTS 0x21004U +#define IDT_SW_NTP16_PCIELCTLSTS 0x21050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP16_NTCTL 0x21400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP16_BARSETUP0 0x21470U +#define IDT_SW_NTP16_BARLIMIT0 0x21474U +#define IDT_SW_NTP16_BARLTBASE0 0x21478U +#define IDT_SW_NTP16_BARUTBASE0 0x2147CU +#define IDT_SW_NTP16_BARSETUP1 0x21480U +#define IDT_SW_NTP16_BARLIMIT1 0x21484U +#define IDT_SW_NTP16_BARLTBASE1 0x21488U +#define IDT_SW_NTP16_BARUTBASE1 0x2148CU +#define IDT_SW_NTP16_BARSETUP2 0x21490U +#define IDT_SW_NTP16_BARLIMIT2 0x21494U +#define IDT_SW_NTP16_BARLTBASE2 0x21498U +#define IDT_SW_NTP16_BARUTBASE2 0x2149CU +#define IDT_SW_NTP16_BARSETUP3 0x214A0U +#define IDT_SW_NTP16_BARLIMIT3 0x214A4U +#define IDT_SW_NTP16_BARLTBASE3 0x214A8U +#define IDT_SW_NTP16_BARUTBASE3 0x214ACU +#define IDT_SW_NTP16_BARSETUP4 0x214B0U +#define IDT_SW_NTP16_BARLIMIT4 0x214B4U +#define IDT_SW_NTP16_BARLTBASE4 0x214B8U +#define IDT_SW_NTP16_BARUTBASE4 0x214BCU +#define IDT_SW_NTP16_BARSETUP5 0x214C0U +#define IDT_SW_NTP16_BARLIMIT5 0x214C4U +#define IDT_SW_NTP16_BARLTBASE5 0x214C8U +#define IDT_SW_NTP16_BARUTBASE5 0x214CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP20_PCIECMDSTS 0x29004U +#define IDT_SW_NTP20_PCIELCTLSTS 0x29050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP20_NTCTL 0x29400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP20_BARSETUP0 0x29470U +#define IDT_SW_NTP20_BARLIMIT0 0x29474U +#define IDT_SW_NTP20_BARLTBASE0 0x29478U +#define IDT_SW_NTP20_BARUTBASE0 0x2947CU +#define IDT_SW_NTP20_BARSETUP1 0x29480U +#define IDT_SW_NTP20_BARLIMIT1 0x29484U +#define IDT_SW_NTP20_BARLTBASE1 0x29488U +#define IDT_SW_NTP20_BARUTBASE1 0x2948CU +#define IDT_SW_NTP20_BARSETUP2 0x29490U +#define IDT_SW_NTP20_BARLIMIT2 0x29494U +#define IDT_SW_NTP20_BARLTBASE2 0x29498U +#define IDT_SW_NTP20_BARUTBASE2 0x2949CU +#define IDT_SW_NTP20_BARSETUP3 0x294A0U +#define IDT_SW_NTP20_BARLIMIT3 0x294A4U +#define IDT_SW_NTP20_BARLTBASE3 0x294A8U +#define IDT_SW_NTP20_BARUTBASE3 0x294ACU +#define IDT_SW_NTP20_BARSETUP4 0x294B0U +#define IDT_SW_NTP20_BARLIMIT4 0x294B4U +#define IDT_SW_NTP20_BARLTBASE4 0x294B8U +#define IDT_SW_NTP20_BARUTBASE4 0x294BCU +#define IDT_SW_NTP20_BARSETUP5 0x294C0U +#define IDT_SW_NTP20_BARLIMIT5 0x294C4U +#define IDT_SW_NTP20_BARLTBASE5 0x294C8U +#define IDT_SW_NTP20_BARUTBASE5 0x294CCU +/* IDT PCIe-switch control register (DWORD) */ +#define IDT_SW_CTL 0x3E000U +/* Boot Configuration Vector Status (DWORD) */ +#define IDT_SW_BCVSTS 0x3E004U +/* Port Clocking Mode (DWORD) */ +#define IDT_SW_PCLKMODE 0x3E008U +/* Reset Drain Delay (DWORD) */ +#define IDT_SW_RDRAINDELAY 0x3E080U +/* Port Operating Mode Change Drain Delay (DWORD) */ +#define IDT_SW_POMCDELAY 0x3E084U +/* Side Effect Delay (DWORD) */ +#define IDT_SW_SEDELAY 0x3E088U +/* Upstream Secondary Bus Reset Delay (DWORD) */ +#define IDT_SW_SSBRDELAY 0x3E08CU +/* Switch partition N Control/Status/Failover registers */ +#define IDT_SW_SWPART0CTL 0x3E100U +#define IDT_SW_SWPART0STS 0x3E104U +#define IDT_SW_SWPART0FCTL 0x3E108U +#define IDT_SW_SWPART1CTL 0x3E120U +#define IDT_SW_SWPART1STS 0x3E124U +#define IDT_SW_SWPART1FCTL 0x3E128U +#define IDT_SW_SWPART2CTL 0x3E140U +#define IDT_SW_SWPART2STS 0x3E144U +#define IDT_SW_SWPART2FCTL 0x3E148U +#define IDT_SW_SWPART3CTL 0x3E160U +#define IDT_SW_SWPART3STS 0x3E164U +#define IDT_SW_SWPART3FCTL 0x3E168U +#define IDT_SW_SWPART4CTL 0x3E180U +#define IDT_SW_SWPART4STS 0x3E184U +#define IDT_SW_SWPART4FCTL 0x3E188U +#define IDT_SW_SWPART5CTL 0x3E1A0U +#define IDT_SW_SWPART5STS 0x3E1A4U +#define IDT_SW_SWPART5FCTL 0x3E1A8U +#define IDT_SW_SWPART6CTL 0x3E1C0U +#define IDT_SW_SWPART6STS 0x3E1C4U +#define IDT_SW_SWPART6FCTL 0x3E1C8U +#define IDT_SW_SWPART7CTL 0x3E1E0U +#define IDT_SW_SWPART7STS 0x3E1E4U +#define IDT_SW_SWPART7FCTL 0x3E1E8U +/* Switch port N control and status registers */ +#define IDT_SW_SWPORT0CTL 0x3E200U +#define IDT_SW_SWPORT0STS 0x3E204U +#define IDT_SW_SWPORT0FCTL 0x3E208U +#define IDT_SW_SWPORT2CTL 0x3E240U +#define IDT_SW_SWPORT2STS 0x3E244U +#define IDT_SW_SWPORT2FCTL 0x3E248U +#define IDT_SW_SWPORT4CTL 0x3E280U +#define IDT_SW_SWPORT4STS 0x3E284U +#define IDT_SW_SWPORT4FCTL 0x3E288U +#define IDT_SW_SWPORT6CTL 0x3E2C0U +#define IDT_SW_SWPORT6STS 0x3E2C4U +#define IDT_SW_SWPORT6FCTL 0x3E2C8U +#define IDT_SW_SWPORT8CTL 0x3E300U +#define IDT_SW_SWPORT8STS 0x3E304U +#define IDT_SW_SWPORT8FCTL 0x3E308U +#define IDT_SW_SWPORT12CTL 0x3E380U +#define IDT_SW_SWPORT12STS 0x3E384U +#define IDT_SW_SWPORT12FCTL 0x3E388U +#define IDT_SW_SWPORT16CTL 0x3E400U +#define IDT_SW_SWPORT16STS 0x3E404U +#define IDT_SW_SWPORT16FCTL 0x3E408U +#define IDT_SW_SWPORT20CTL 0x3E480U +#define IDT_SW_SWPORT20STS 0x3E484U +#define IDT_SW_SWPORT20FCTL 0x3E488U +/* Switch Event registers */ +/* Switch Event Status/Mask/Partition mask (DWORD) */ +#define IDT_SW_SESTS 0x3EC00U +#define IDT_SW_SEMSK 0x3EC04U +#define IDT_SW_SEPMSK 0x3EC08U +/* Switch Event Link Up/Down Status/Mask (DWORD) */ +#define IDT_SW_SELINKUPSTS 0x3EC0CU +#define IDT_SW_SELINKUPMSK 0x3EC10U +#define IDT_SW_SELINKDNSTS 0x3EC14U +#define IDT_SW_SELINKDNMSK 0x3EC18U +/* Switch Event Fundamental Reset Status/Mask (DWORD) */ +#define IDT_SW_SEFRSTSTS 0x3EC1CU +#define IDT_SW_SEFRSTMSK 0x3EC20U +/* Switch Event Hot Reset Status/Mask (DWORD) */ +#define IDT_SW_SEHRSTSTS 0x3EC24U +#define IDT_SW_SEHRSTMSK 0x3EC28U +/* Switch Event Failover Mask (DWORD) */ +#define IDT_SW_SEFOVRMSK 0x3EC2CU +/* Switch Event Global Signal Status/Mask (DWORD) */ +#define IDT_SW_SEGSIGSTS 0x3EC30U +#define IDT_SW_SEGSIGMSK 0x3EC34U +/* NT Global Doorbell Status (DWORD) */ +#define IDT_SW_GDBELLSTS 0x3EC3CU +/* Switch partition N message M control (msgs routing table) (DWORD) */ +#define IDT_SW_SWP0MSGCTL0 0x3EE00U +#define IDT_SW_SWP1MSGCTL0 0x3EE04U +#define IDT_SW_SWP2MSGCTL0 0x3EE08U +#define IDT_SW_SWP3MSGCTL0 0x3EE0CU +#define IDT_SW_SWP4MSGCTL0 0x3EE10U +#define IDT_SW_SWP5MSGCTL0 0x3EE14U +#define IDT_SW_SWP6MSGCTL0 0x3EE18U +#define IDT_SW_SWP7MSGCTL0 0x3EE1CU +#define IDT_SW_SWP0MSGCTL1 0x3EE20U +#define IDT_SW_SWP1MSGCTL1 0x3EE24U +#define IDT_SW_SWP2MSGCTL1 0x3EE28U +#define IDT_SW_SWP3MSGCTL1 0x3EE2CU +#define IDT_SW_SWP4MSGCTL1 0x3EE30U +#define IDT_SW_SWP5MSGCTL1 0x3EE34U +#define IDT_SW_SWP6MSGCTL1 0x3EE38U +#define IDT_SW_SWP7MSGCTL1 0x3EE3CU +#define IDT_SW_SWP0MSGCTL2 0x3EE40U +#define IDT_SW_SWP1MSGCTL2 0x3EE44U +#define IDT_SW_SWP2MSGCTL2 0x3EE48U +#define IDT_SW_SWP3MSGCTL2 0x3EE4CU +#define IDT_SW_SWP4MSGCTL2 0x3EE50U +#define IDT_SW_SWP5MSGCTL2 0x3EE54U +#define IDT_SW_SWP6MSGCTL2 0x3EE58U +#define IDT_SW_SWP7MSGCTL2 0x3EE5CU +#define IDT_SW_SWP0MSGCTL3 0x3EE60U +#define IDT_SW_SWP1MSGCTL3 0x3EE64U +#define IDT_SW_SWP2MSGCTL3 0x3EE68U +#define IDT_SW_SWP3MSGCTL3 0x3EE6CU +#define IDT_SW_SWP4MSGCTL3 0x3EE70U +#define IDT_SW_SWP5MSGCTL3 0x3EE74U +#define IDT_SW_SWP6MSGCTL3 0x3EE78U +#define IDT_SW_SWP7MSGCTL3 0x3EE7CU +/* SMBus Status and Control registers (DWORD) */ +#define IDT_SW_SMBUSSTS 0x3F188U +#define IDT_SW_SMBUSCTL 0x3F18CU +/* Serial EEPROM Interface (DWORD) */ +#define IDT_SW_EEPROMINTF 0x3F190U +/* MBus I/O Expander Address N (DWORD) */ +#define IDT_SW_IOEXPADDR0 0x3F198U +#define IDT_SW_IOEXPADDR1 0x3F19CU +#define IDT_SW_IOEXPADDR2 0x3F1A0U +#define IDT_SW_IOEXPADDR3 0x3F1A4U +#define IDT_SW_IOEXPADDR4 0x3F1A8U +#define IDT_SW_IOEXPADDR5 0x3F1ACU +/* General Purpose Events Control and Status registers (DWORD) */ +#define IDT_SW_GPECTL 0x3F1B0U +#define IDT_SW_GPESTS 0x3F1B4U +/* Temperature sensor Control/Status/Alarm/Adjustment/Slope registers */ +#define IDT_SW_TMPCTL 0x3F1D4U +#define IDT_SW_TMPSTS 0x3F1D8U +#define IDT_SW_TMPALARM 0x3F1DCU +#define IDT_SW_TMPADJ 0x3F1E0U +#define IDT_SW_TSSLOPE 0x3F1E4U +/* SMBus Configuration Block header log (DWORD) */ +#define IDT_SW_SMBUSCBHL 0x3F1E8U + +/* + * Common registers related constants + * @IDT_REG_ALIGN: Registers alignment used in the driver + * @IDT_REG_PCI_MAX: Maximum PCI configuration space register value + * @IDT_REG_SW_MAX: Maximum global register value + */ +#define IDT_REG_ALIGN 4 +#define IDT_REG_PCI_MAX 0x00FFFU +#define IDT_REG_SW_MAX 0x3FFFFU + +/* + * PCICMDSTS register fields related constants + * @IDT_PCICMDSTS_IOAE: I/O access enable + * @IDT_PCICMDSTS_MAE: Memory access enable + * @IDT_PCICMDSTS_BME: Bus master enable + */ +#define IDT_PCICMDSTS_IOAE 0x00000001U +#define IDT_PCICMDSTS_MAE 0x00000002U +#define IDT_PCICMDSTS_BME 0x00000004U + +/* + * PCIEDCAP register fields related constants + * @IDT_PCIEDCAP_MPAYLOAD_MASK: Maximum payload size mask + * @IDT_PCIEDCAP_MPAYLOAD_FLD: Maximum payload size field offset + * @IDT_PCIEDCAP_MPAYLOAD_S128: Max supported payload size of 128 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S256: Max supported payload size of 256 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S512: Max supported payload size of 512 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S1024: Max supported payload size of 1024 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S2048: Max supported payload size of 2048 bytes + */ +#define IDT_PCIEDCAP_MPAYLOAD_MASK 0x00000007U +#define IDT_PCIEDCAP_MPAYLOAD_FLD 0 +#define IDT_PCIEDCAP_MPAYLOAD_S128 0x00000000U +#define IDT_PCIEDCAP_MPAYLOAD_S256 0x00000001U +#define IDT_PCIEDCAP_MPAYLOAD_S512 0x00000002U +#define IDT_PCIEDCAP_MPAYLOAD_S1024 0x00000003U +#define IDT_PCIEDCAP_MPAYLOAD_S2048 0x00000004U + +/* + * PCIEDCTLSTS registers fields related constants + * @IDT_PCIEDCTL_MPS_MASK: Maximum payload size mask + * @IDT_PCIEDCTL_MPS_FLD: MPS field offset + * @IDT_PCIEDCTL_MPS_S128: Max payload size of 128 bytes + * @IDT_PCIEDCTL_MPS_S256: Max payload size of 256 bytes + * @IDT_PCIEDCTL_MPS_S512: Max payload size of 512 bytes + * @IDT_PCIEDCTL_MPS_S1024: Max payload size of 1024 bytes + * @IDT_PCIEDCTL_MPS_S2048: Max payload size of 2048 bytes + * @IDT_PCIEDCTL_MPS_S4096: Max payload size of 4096 bytes + */ +#define IDT_PCIEDCTLSTS_MPS_MASK 0x000000E0U +#define IDT_PCIEDCTLSTS_MPS_FLD 5 +#define IDT_PCIEDCTLSTS_MPS_S128 0x00000000U +#define IDT_PCIEDCTLSTS_MPS_S256 0x00000020U +#define IDT_PCIEDCTLSTS_MPS_S512 0x00000040U +#define IDT_PCIEDCTLSTS_MPS_S1024 0x00000060U +#define IDT_PCIEDCTLSTS_MPS_S2048 0x00000080U +#define IDT_PCIEDCTLSTS_MPS_S4096 0x000000A0U + +/* + * PCIELCAP register fields related constants + * @IDT_PCIELCAP_PORTNUM_MASK: Port number field mask + * @IDT_PCIELCAP_PORTNUM_FLD: Port number field offset + */ +#define IDT_PCIELCAP_PORTNUM_MASK 0xFF000000U +#define IDT_PCIELCAP_PORTNUM_FLD 24 + +/* + * PCIELCTLSTS registers fields related constants + * @IDT_PCIELSTS_CLS_MASK: Current link speed mask + * @IDT_PCIELSTS_CLS_FLD: Current link speed field offset + * @IDT_PCIELSTS_NLW_MASK: Negotiated link width mask + * @IDT_PCIELSTS_NLW_FLD: Negotiated link width field offset + * @IDT_PCIELSTS_SCLK_COM: Common slot clock configuration + */ +#define IDT_PCIELCTLSTS_CLS_MASK 0x000F0000U +#define IDT_PCIELCTLSTS_CLS_FLD 16 +#define IDT_PCIELCTLSTS_NLW_MASK 0x03F00000U +#define IDT_PCIELCTLSTS_NLW_FLD 20 +#define IDT_PCIELCTLSTS_SCLK_COM 0x10000000U + +/* + * NTCTL register fields related constants + * @IDT_NTCTL_IDPROTDIS: ID Protection check disable (disable MTBL) + * @IDT_NTCTL_CPEN: Completion enable + * @IDT_NTCTL_RNS: Request no snoop processing (if MTBL disabled) + * @IDT_NTCTL_ATP: Address type processing (if MTBL disabled) + */ +#define IDT_NTCTL_IDPROTDIS 0x00000001U +#define IDT_NTCTL_CPEN 0x00000002U +#define IDT_NTCTL_RNS 0x00000004U +#define IDT_NTCTL_ATP 0x00000008U + +/* + * NTINTSTS register fields related constants + * @IDT_NTINTSTS_MSG: Message interrupt bit + * @IDT_NTINTSTS_DBELL: Doorbell interrupt bit + * @IDT_NTINTSTS_SEVENT: Switch Event interrupt bit + * @IDT_NTINTSTS_TMPSENSOR: Temperature sensor interrupt bit + */ +#define IDT_NTINTSTS_MSG 0x00000001U +#define IDT_NTINTSTS_DBELL 0x00000002U +#define IDT_NTINTSTS_SEVENT 0x00000008U +#define IDT_NTINTSTS_TMPSENSOR 0x00000080U + +/* + * NTINTMSK register fields related constants + * @IDT_NTINTMSK_MSG: Message interrupt mask bit + * @IDT_NTINTMSK_DBELL: Doorbell interrupt mask bit + * @IDT_NTINTMSK_SEVENT: Switch Event interrupt mask bit + * @IDT_NTINTMSK_TMPSENSOR: Temperature sensor interrupt mask bit + * @IDT_NTINTMSK_ALL: NTB-related interrupts mask + */ +#define IDT_NTINTMSK_MSG 0x00000001U +#define IDT_NTINTMSK_DBELL 0x00000002U +#define IDT_NTINTMSK_SEVENT 0x00000008U +#define IDT_NTINTMSK_TMPSENSOR 0x00000080U +#define IDT_NTINTMSK_ALL \ + (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | IDT_NTINTMSK_SEVENT) + +/* + * NTGSIGNAL register fields related constants + * @IDT_NTGSIGNAL_SET: Set global signal of the local partition + */ +#define IDT_NTGSIGNAL_SET 0x00000001U + +/* + * BARSETUP register fields related constants + * @IDT_BARSETUP_TYPE_MASK: Mask of the TYPE field + * @IDT_BARSETUP_TYPE_32: 32-bit addressing BAR + * @IDT_BARSETUP_TYPE_64: 64-bit addressing BAR + * @IDT_BARSETUP_PREF: Value of the BAR prefetchable field + * @IDT_BARSETUP_SIZE_MASK: Mask of the SIZE field + * @IDT_BARSETUP_SIZE_FLD: SIZE field offset + * @IDT_BARSETUP_SIZE_CFG: SIZE field value in case of config space MODE + * @IDT_BARSETUP_MODE_CFG: Configuration space BAR mode + * @IDT_BARSETUP_ATRAN_MASK: ATRAN field mask + * @IDT_BARSETUP_ATRAN_FLD: ATRAN field offset + * @IDT_BARSETUP_ATRAN_DIR: Direct address translation memory window + * @IDT_BARSETUP_ATRAN_LUT12: 12-entry lookup table + * @IDT_BARSETUP_ATRAN_LUT24: 24-entry lookup table + * @IDT_BARSETUP_TPART_MASK: TPART field mask + * @IDT_BARSETUP_TPART_FLD: TPART field offset + * @IDT_BARSETUP_EN: BAR enable bit + */ +#define IDT_BARSETUP_TYPE_MASK 0x00000006U +#define IDT_BARSETUP_TYPE_FLD 0 +#define IDT_BARSETUP_TYPE_32 0x00000000U +#define IDT_BARSETUP_TYPE_64 0x00000004U +#define IDT_BARSETUP_PREF 0x00000008U +#define IDT_BARSETUP_SIZE_MASK 0x000003F0U +#define IDT_BARSETUP_SIZE_FLD 4 +#define IDT_BARSETUP_SIZE_CFG 0x000000C0U +#define IDT_BARSETUP_MODE_CFG 0x00000400U +#define IDT_BARSETUP_ATRAN_MASK 0x00001800U +#define IDT_BARSETUP_ATRAN_FLD 11 +#define IDT_BARSETUP_ATRAN_DIR 0x00000000U +#define IDT_BARSETUP_ATRAN_LUT12 0x00000800U +#define IDT_BARSETUP_ATRAN_LUT24 0x00001000U +#define IDT_BARSETUP_TPART_MASK 0x0000E000U +#define IDT_BARSETUP_TPART_FLD 13 +#define IDT_BARSETUP_EN 0x80000000U + +/* + * NTMTBLDATA register fields related constants + * @IDT_NTMTBLDATA_VALID: Set the MTBL entry being valid + * @IDT_NTMTBLDATA_REQID_MASK: Bus:Device:Function field mask + * @IDT_NTMTBLDATA_REQID_FLD: Bus:Device:Function field offset + * @IDT_NTMTBLDATA_PART_MASK: Partition field mask + * @IDT_NTMTBLDATA_PART_FLD: Partition field offset + * @IDT_NTMTBLDATA_ATP_TRANS: Enable AT field translation on request TLPs + * @IDT_NTMTBLDATA_CNS_INV: Enable No Snoop attribute inversion of + * Completion TLPs + * @IDT_NTMTBLDATA_RNS_INV: Enable No Snoop attribute inversion of + * Request TLPs + */ +#define IDT_NTMTBLDATA_VALID 0x00000001U +#define IDT_NTMTBLDATA_REQID_MASK 0x0001FFFEU +#define IDT_NTMTBLDATA_REQID_FLD 1 +#define IDT_NTMTBLDATA_PART_MASK 0x000E0000U +#define IDT_NTMTBLDATA_PART_FLD 17 +#define IDT_NTMTBLDATA_ATP_TRANS 0x20000000U +#define IDT_NTMTBLDATA_CNS_INV 0x40000000U +#define IDT_NTMTBLDATA_RNS_INV 0x80000000U + +/* + * REQIDCAP register fields related constants + * @IDT_REQIDCAP_REQID_MASK: Request ID field mask + * @IDT_REQIDCAP_REQID_FLD: Request ID field offset + */ +#define IDT_REQIDCAP_REQID_MASK 0x0000FFFFU +#define IDT_REQIDCAP_REQID_FLD 0 + +/* + * LUTOFFSET register fields related constants + * @IDT_LUTOFFSET_INDEX_MASK: Lookup table index field mask + * @IDT_LUTOFFSET_INDEX_FLD: Lookup table index field offset + * @IDT_LUTOFFSET_BAR_MASK: Lookup table BAR select field mask + * @IDT_LUTOFFSET_BAR_FLD: Lookup table BAR select field offset + */ +#define IDT_LUTOFFSET_INDEX_MASK 0x0000001FU +#define IDT_LUTOFFSET_INDEX_FLD 0 +#define IDT_LUTOFFSET_BAR_MASK 0x00000700U +#define IDT_LUTOFFSET_BAR_FLD 8 + +/* + * LUTUDATA register fields related constants + * @IDT_LUTUDATA_PART_MASK: Partition field mask + * @IDT_LUTUDATA_PART_FLD: Partition field offset + * @IDT_LUTUDATA_VALID: Lookup table entry valid bit + */ +#define IDT_LUTUDATA_PART_MASK 0x0000000FU +#define IDT_LUTUDATA_PART_FLD 0 +#define IDT_LUTUDATA_VALID 0x80000000U + +/* + * SWPARTxSTS register fields related constants + * @IDT_SWPARTxSTS_SCI: Switch partition state change initiated + * @IDT_SWPARTxSTS_SCC: Switch partition state change completed + * @IDT_SWPARTxSTS_STATE_MASK: Switch partition state mask + * @IDT_SWPARTxSTS_STATE_FLD: Switch partition state field offset + * @IDT_SWPARTxSTS_STATE_DIS: Switch partition disabled + * @IDT_SWPARTxSTS_STATE_ACT: Switch partition enabled + * @IDT_SWPARTxSTS_STATE_RES: Switch partition in reset + * @IDT_SWPARTxSTS_US: Switch partition has upstream port + * @IDT_SWPARTxSTS_USID_MASK: Switch partition upstream port ID mask + * @IDT_SWPARTxSTS_USID_FLD: Switch partition upstream port ID field offset + * @IDT_SWPARTxSTS_NT: Upstream port has NT function + * @IDT_SWPARTxSTS_DMA: Upstream port has DMA function + */ +#define IDT_SWPARTxSTS_SCI 0x00000001U +#define IDT_SWPARTxSTS_SCC 0x00000002U +#define IDT_SWPARTxSTS_STATE_MASK 0x00000060U +#define IDT_SWPARTxSTS_STATE_FLD 5 +#define IDT_SWPARTxSTS_STATE_DIS 0x00000000U +#define IDT_SWPARTxSTS_STATE_ACT 0x00000020U +#define IDT_SWPARTxSTS_STATE_RES 0x00000060U +#define IDT_SWPARTxSTS_US 0x00000100U +#define IDT_SWPARTxSTS_USID_MASK 0x00003E00U +#define IDT_SWPARTxSTS_USID_FLD 9 +#define IDT_SWPARTxSTS_NT 0x00004000U +#define IDT_SWPARTxSTS_DMA 0x00008000U + +/* + * SWPORTxSTS register fields related constants + * @IDT_SWPORTxSTS_OMCI: Operation mode change initiated + * @IDT_SWPORTxSTS_OMCC: Operation mode change completed + * @IDT_SWPORTxSTS_LINKUP: Link up status + * @IDT_SWPORTxSTS_DS: Port lanes behave as downstream lanes + * @IDT_SWPORTxSTS_MODE_MASK: Port mode field mask + * @IDT_SWPORTxSTS_MODE_FLD: Port mode field offset + * @IDT_SWPORTxSTS_MODE_DIS: Port mode - disabled + * @IDT_SWPORTxSTS_MODE_DS: Port mode - downstream switch port + * @IDT_SWPORTxSTS_MODE_US: Port mode - upstream switch port + * @IDT_SWPORTxSTS_MODE_NT: Port mode - NT function + * @IDT_SWPORTxSTS_MODE_USNT: Port mode - upstream switch port with NTB + * @IDT_SWPORTxSTS_MODE_UNAT: Port mode - unattached + * @IDT_SWPORTxSTS_MODE_USDMA: Port mode - upstream switch port with DMA + * @IDT_SWPORTxSTS_MODE_USNTDMA:Port mode - upstream port with NTB and DMA + * @IDT_SWPORTxSTS_MODE_NTDMA: Port mode - NT function with DMA + * @IDT_SWPORTxSTS_SWPART_MASK: Port partition field mask + * @IDT_SWPORTxSTS_SWPART_FLD: Port partition field offset + * @IDT_SWPORTxSTS_DEVNUM_MASK: Port device number field mask + * @IDT_SWPORTxSTS_DEVNUM_FLD: Port device number field offset + */ +#define IDT_SWPORTxSTS_OMCI 0x00000001U +#define IDT_SWPORTxSTS_OMCC 0x00000002U +#define IDT_SWPORTxSTS_LINKUP 0x00000010U +#define IDT_SWPORTxSTS_DS 0x00000020U +#define IDT_SWPORTxSTS_MODE_MASK 0x000003C0U +#define IDT_SWPORTxSTS_MODE_FLD 6 +#define IDT_SWPORTxSTS_MODE_DIS 0x00000000U +#define IDT_SWPORTxSTS_MODE_DS 0x00000040U +#define IDT_SWPORTxSTS_MODE_US 0x00000080U +#define IDT_SWPORTxSTS_MODE_NT 0x000000C0U +#define IDT_SWPORTxSTS_MODE_USNT 0x00000100U +#define IDT_SWPORTxSTS_MODE_UNAT 0x00000140U +#define IDT_SWPORTxSTS_MODE_USDMA 0x00000180U +#define IDT_SWPORTxSTS_MODE_USNTDMA 0x000001C0U +#define IDT_SWPORTxSTS_MODE_NTDMA 0x00000200U +#define IDT_SWPORTxSTS_SWPART_MASK 0x00001C00U +#define IDT_SWPORTxSTS_SWPART_FLD 10 +#define IDT_SWPORTxSTS_DEVNUM_MASK 0x001F0000U +#define IDT_SWPORTxSTS_DEVNUM_FLD 16 + +/* + * SEMSK register fields related constants + * @IDT_SEMSK_LINKUP: Link Up event mask bit + * @IDT_SEMSK_LINKDN: Link Down event mask bit + * @IDT_SEMSK_GSIGNAL: Global Signal event mask bit + */ +#define IDT_SEMSK_LINKUP 0x00000001U +#define IDT_SEMSK_LINKDN 0x00000002U +#define IDT_SEMSK_GSIGNAL 0x00000020U + +/* + * SWPxMSGCTL register fields related constants + * @IDT_SWPxMSGCTL_REG_MASK: Register select field mask + * @IDT_SWPxMSGCTL_REG_FLD: Register select field offset + * @IDT_SWPxMSGCTL_PART_MASK: Partition select field mask + * @IDT_SWPxMSGCTL_PART_FLD: Partition select field offset + */ +#define IDT_SWPxMSGCTL_REG_MASK 0x00000003U +#define IDT_SWPxMSGCTL_REG_FLD 0 +#define IDT_SWPxMSGCTL_PART_MASK 0x00000070U +#define IDT_SWPxMSGCTL_PART_FLD 4 + +/* + * TMPCTL register fields related constants + * @IDT_TMPCTL_LTH_MASK: Low temperature threshold field mask + * @IDT_TMPCTL_LTH_FLD: Low temperature threshold field offset + * @IDT_TMPCTL_MTH_MASK: Middle temperature threshold field mask + * @IDT_TMPCTL_MTH_FLD: Middle temperature threshold field offset + * @IDT_TMPCTL_HTH_MASK: High temperature threshold field mask + * @IDT_TMPCTL_HTH_FLD: High temperature threshold field offset + * @IDT_TMPCTL_PDOWN: Temperature sensor power down + */ +#define IDT_TMPCTL_LTH_MASK 0x000000FFU +#define IDT_TMPCTL_LTH_FLD 0 +#define IDT_TMPCTL_MTH_MASK 0x0000FF00U +#define IDT_TMPCTL_MTH_FLD 8 +#define IDT_TMPCTL_HTH_MASK 0x00FF0000U +#define IDT_TMPCTL_HTH_FLD 16 +#define IDT_TMPCTL_PDOWN 0x80000000U + +/* + * TMPSTS register fields related constants + * @IDT_TMPSTS_TEMP_MASK: Current temperature field mask + * @IDT_TMPSTS_TEMP_FLD: Current temperature field offset + * @IDT_TMPSTS_LTEMP_MASK: Lowest temperature field mask + * @IDT_TMPSTS_LTEMP_FLD: Lowest temperature field offset + * @IDT_TMPSTS_HTEMP_MASK: Highest temperature field mask + * @IDT_TMPSTS_HTEMP_FLD: Highest temperature field offset + */ +#define IDT_TMPSTS_TEMP_MASK 0x000000FFU +#define IDT_TMPSTS_TEMP_FLD 0 +#define IDT_TMPSTS_LTEMP_MASK 0x0000FF00U +#define IDT_TMPSTS_LTEMP_FLD 8 +#define IDT_TMPSTS_HTEMP_MASK 0x00FF0000U +#define IDT_TMPSTS_HTEMP_FLD 16 + +/* + * TMPALARM register fields related constants + * @IDT_TMPALARM_LTEMP_MASK: Lowest temperature field mask + * @IDT_TMPALARM_LTEMP_FLD: Lowest temperature field offset + * @IDT_TMPALARM_HTEMP_MASK: Highest temperature field mask + * @IDT_TMPALARM_HTEMP_FLD: Highest temperature field offset + * @IDT_TMPALARM_IRQ_MASK: Alarm IRQ status mask + */ +#define IDT_TMPALARM_LTEMP_MASK 0x0000FF00U +#define IDT_TMPALARM_LTEMP_FLD 8 +#define IDT_TMPALARM_HTEMP_MASK 0x00FF0000U +#define IDT_TMPALARM_HTEMP_FLD 16 +#define IDT_TMPALARM_IRQ_MASK 0x3F000000U + +/* + * TMPADJ register fields related constants + * @IDT_TMPADJ_OFFSET_MASK: Temperature value offset field mask + * @IDT_TMPADJ_OFFSET_FLD: Temperature value offset field offset + */ +#define IDT_TMPADJ_OFFSET_MASK 0x000000FFU +#define IDT_TMPADJ_OFFSET_FLD 0 + +/* + * Helper macro to get/set the corresponding field value + * @GET_FIELD: Retrieve the value of the corresponding field + * @SET_FIELD: Set the specified field up + * @IS_FLD_SET: Check whether a field is set with value + */ +#define GET_FIELD(field, data) \ + (((u32)(data) & IDT_ ##field## _MASK) >> IDT_ ##field## _FLD) +#define SET_FIELD(field, data, value) \ + (((u32)(data) & ~IDT_ ##field## _MASK) | \ + ((u32)(value) << IDT_ ##field## _FLD)) +#define IS_FLD_SET(field, data, value) \ + (((u32)(data) & IDT_ ##field## _MASK) == IDT_ ##field## _ ##value) + +/* + * Useful registers masks: + * @IDT_DBELL_MASK: Doorbell bits mask + * @IDT_OUTMSG_MASK: Out messages status bits mask + * @IDT_INMSG_MASK: In messages status bits mask + * @IDT_MSG_MASK: Any message status bits mask + */ +#define IDT_DBELL_MASK ((u32)0xFFFFFFFFU) +#define IDT_OUTMSG_MASK ((u32)0x0000000FU) +#define IDT_INMSG_MASK ((u32)0x000F0000U) +#define IDT_MSG_MASK (IDT_INMSG_MASK | IDT_OUTMSG_MASK) + +/* + * Number of IDT NTB resources: + * @IDT_MSG_CNT: Number of Message registers + * @IDT_BAR_CNT: Number of BARs of each port + * @IDT_MTBL_ENTRY_CNT: Number mapping table entries + */ +#define IDT_MSG_CNT 4 +#define IDT_BAR_CNT 6 +#define IDT_MTBL_ENTRY_CNT 64 + +/* + * General IDT PCIe-switch constant + * @IDT_MAX_NR_PORTS: Maximum number of ports per IDT PCIe-switch + * @IDT_MAX_NR_PARTS: Maximum number of partitions per IDT PCIe-switch + * @IDT_MAX_NR_PEERS: Maximum number of NT-peers per IDT PCIe-switch + * @IDT_MAX_NR_MWS: Maximum number of Memory Widows + * @IDT_PCIE_REGSIZE: Size of the registers in bytes + * @IDT_TRANS_ALIGN: Alignment of translated base address + * @IDT_DIR_SIZE_ALIGN: Alignment of size setting for direct translated MWs. + * Even though the lower 10 bits are reserved, they are + * treated by IDT as one's so basically there is no any + * alignment of size limit for DIR address translation. + */ +#define IDT_MAX_NR_PORTS 24 +#define IDT_MAX_NR_PARTS 8 +#define IDT_MAX_NR_PEERS 8 +#define IDT_MAX_NR_MWS 29 +#define IDT_PCIE_REGSIZE 4 +#define IDT_TRANS_ALIGN 4 +#define IDT_DIR_SIZE_ALIGN 1 + +/* + * IDT PCIe-switch temperature sensor value limits + * @IDT_TEMP_MIN_MDEG: Minimal integer value of temperature + * @IDT_TEMP_MAX_MDEG: Maximal integer value of temperature + * @IDT_TEMP_MIN_OFFSET:Minimal integer value of temperature offset + * @IDT_TEMP_MAX_OFFSET:Maximal integer value of temperature offset + */ +#define IDT_TEMP_MIN_MDEG 0 +#define IDT_TEMP_MAX_MDEG 127500 +#define IDT_TEMP_MIN_OFFSET -64000 +#define IDT_TEMP_MAX_OFFSET 63500 + +/* + * Temperature sensor values enumeration + * @IDT_TEMP_CUR: Current temperature + * @IDT_TEMP_LOW: Lowest historical temperature + * @IDT_TEMP_HIGH: Highest historical temperature + * @IDT_TEMP_OFFSET: Current temperature offset + */ +enum idt_temp_val { + IDT_TEMP_CUR, + IDT_TEMP_LOW, + IDT_TEMP_HIGH, + IDT_TEMP_OFFSET +}; + +/* + * IDT Memory Windows type. Depending on the device settings, IDT supports + * Direct Address Translation MW registers and Lookup Table registers + * @IDT_MW_DIR: Direct address translation + * @IDT_MW_LUT12: 12-entry lookup table entry + * @IDT_MW_LUT24: 24-entry lookup table entry + * + * NOTE These values are exactly the same as one of the BARSETUP ATRAN field + */ +enum idt_mw_type { + IDT_MW_DIR = 0x0, + IDT_MW_LUT12 = 0x1, + IDT_MW_LUT24 = 0x2 +}; + +/* + * IDT PCIe-switch model private data + * @name: Device name + * @port_cnt: Total number of NT endpoint ports + * @ports: Port ids + */ +struct idt_89hpes_cfg { + char *name; + unsigned char port_cnt; + unsigned char ports[]; +}; + +/* + * Memory window configuration structure + * @type: Type of the memory window (direct address translation or lookup + * table) + * + * @bar: PCIe BAR the memory window referenced to + * @idx: Index of the memory window within the BAR + * + * @addr_align: Alignment of translated address + * @size_align: Alignment of memory window size + * @size_max: Maximum size of memory window + */ +struct idt_mw_cfg { + enum idt_mw_type type; + + unsigned char bar; + unsigned char idx; + + u64 addr_align; + u64 size_align; + u64 size_max; +}; + +/* + * Description structure of peer IDT NT-functions: + * @port: NT-function port + * @part: NT-function partition + * + * @mw_cnt: Number of memory windows supported by NT-function + * @mws: Array of memory windows descriptors + */ +struct idt_ntb_peer { + unsigned char port; + unsigned char part; + + unsigned char mw_cnt; + struct idt_mw_cfg *mws; +}; + +/* + * Description structure of local IDT NT-function: + * @ntb: Linux NTB-device description structure + * @swcfg: Pointer to the structure of local IDT PCIe-switch + * specific cofnfigurations + * + * @port: Local NT-function port + * @part: Local NT-function partition + * + * @peer_cnt: Number of peers with activated NTB-function + * @peers: Array of peers descripting structures + * @port_idx_map: Map of port number -> peer index + * @part_idx_map: Map of partition number -> peer index + * + * @mtbl_lock: Mapping table access lock + * + * @mw_cnt: Number of memory windows supported by NT-function + * @mws: Array of memory windows descriptors + * @lut_lock: Lookup table access lock + * + * @msg_locks: Message registers mapping table lockers + * + * @cfgspc: Virtual address of the memory mapped configuration + * space of the NT-function + * @db_mask_lock: Doorbell mask register lock + * @msg_mask_lock: Message mask register lock + * @gasa_lock: GASA registers access lock + * + * @hwmon_mtx: Temperature sensor interface update mutex + * + * @dbgfs_info: DebugFS info node + */ +struct idt_ntb_dev { + struct ntb_dev ntb; + struct idt_89hpes_cfg *swcfg; + + unsigned char port; + unsigned char part; + + unsigned char peer_cnt; + struct idt_ntb_peer peers[IDT_MAX_NR_PEERS]; + char port_idx_map[IDT_MAX_NR_PORTS]; + char part_idx_map[IDT_MAX_NR_PARTS]; + + spinlock_t mtbl_lock; + + unsigned char mw_cnt; + struct idt_mw_cfg *mws; + spinlock_t lut_lock; + + spinlock_t msg_locks[IDT_MSG_CNT]; + + void __iomem *cfgspc; + spinlock_t db_mask_lock; + spinlock_t msg_mask_lock; + spinlock_t gasa_lock; + + struct mutex hwmon_mtx; + + struct dentry *dbgfs_info; +}; +#define to_ndev_ntb(__ntb) container_of(__ntb, struct idt_ntb_dev, ntb) + +/* + * Descriptor of the IDT PCIe-switch BAR resources + * @setup: BAR setup register + * @limit: BAR limit register + * @ltbase: Lower translated base address + * @utbase: Upper translated base address + */ +struct idt_ntb_bar { + unsigned int setup; + unsigned int limit; + unsigned int ltbase; + unsigned int utbase; +}; + +/* + * Descriptor of the IDT PCIe-switch message resources + * @in: Inbound message register + * @out: Outbound message register + * @src: Source of inbound message register + */ +struct idt_ntb_msg { + unsigned int in; + unsigned int out; + unsigned int src; +}; + +/* + * Descriptor of the IDT PCIe-switch NT-function specific parameters in the + * PCI Configuration Space + * @bars: BARs related registers + * @msgs: Messaging related registers + */ +struct idt_ntb_regs { + struct idt_ntb_bar bars[IDT_BAR_CNT]; + struct idt_ntb_msg msgs[IDT_MSG_CNT]; +}; + +/* + * Descriptor of the IDT PCIe-switch port specific parameters in the + * Global Configuration Space + * @pcicmdsts: PCI command/status register + * @pcielctlsts: PCIe link control/status + * + * @ctl: Port control register + * @sts: Port status register + * + * @bars: BARs related registers + */ +struct idt_ntb_port { + unsigned int pcicmdsts; + unsigned int pcielctlsts; + unsigned int ntctl; + + unsigned int ctl; + unsigned int sts; + + struct idt_ntb_bar bars[IDT_BAR_CNT]; +}; + +/* + * Descriptor of the IDT PCIe-switch partition specific parameters. + * @ctl: Partition control register in the Global Address Space + * @sts: Partition status register in the Global Address Space + * @msgctl: Messages control registers + */ +struct idt_ntb_part { + unsigned int ctl; + unsigned int sts; + unsigned int msgctl[IDT_MSG_CNT]; +}; + +#endif /* NTB_HW_IDT_H */ diff --git a/drivers/ntb/hw/intel/Kconfig b/drivers/ntb/hw/intel/Kconfig new file mode 100644 index 0000000000..ed4d6dd570 --- /dev/null +++ b/drivers/ntb/hw/intel/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NTB_INTEL + tristate "Intel Non-Transparent Bridge support" + depends on X86_64 + help + This driver supports Intel NTB on capable Xeon and Atom hardware. + + If unsure, say N. diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile new file mode 100644 index 0000000000..f80da0ba15 --- /dev/null +++ b/drivers/ntb/hw/intel/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o +ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o ntb_hw_gen4.o diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c new file mode 100644 index 0000000000..9ab836d0d4 --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -0,0 +1,2085 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Intel PCIe NTB Linux driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ntb_hw_intel.h" +#include "ntb_hw_gen1.h" +#include "ntb_hw_gen3.h" +#include "ntb_hw_gen4.h" + +#define NTB_NAME "ntb_hw_intel" +#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver" +#define NTB_VER "2.0" + +MODULE_DESCRIPTION(NTB_DESC); +MODULE_VERSION(NTB_VER); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corporation"); + +#define bar0_off(base, bar) ((base) + ((bar) << 2)) +#define bar2_off(base, bar) bar0_off(base, (bar) - 2) + +static const struct intel_ntb_reg xeon_reg; +static const struct intel_ntb_alt_reg xeon_pri_reg; +static const struct intel_ntb_alt_reg xeon_sec_reg; +static const struct intel_ntb_alt_reg xeon_b2b_reg; +static const struct intel_ntb_xlat_reg xeon_pri_xlat; +static const struct intel_ntb_xlat_reg xeon_sec_xlat; +static const struct ntb_dev_ops intel_ntb_ops; + +static const struct file_operations intel_ntb_debugfs_info; +static struct dentry *debugfs_dir; + +static int b2b_mw_idx = -1; +module_param(b2b_mw_idx, int, 0644); +MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A " + "value of zero or positive starts from first mw idx, and a " + "negative value starts from last mw idx. Both sides MUST " + "set the same value here!"); + +static unsigned int b2b_mw_share; +module_param(b2b_mw_share, uint, 0644); +MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the " + "ntb so that the peer ntb only occupies the first half of " + "the mw, so the second half can still be used as a mw. Both " + "sides MUST set the same value here!"); + +module_param_named(xeon_b2b_usd_bar2_addr64, + xeon_b2b_usd_addr.bar2_addr64, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, + "XEON B2B USD BAR 2 64-bit address"); + +module_param_named(xeon_b2b_usd_bar4_addr64, + xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64, + "XEON B2B USD BAR 4 64-bit address"); + +module_param_named(xeon_b2b_usd_bar4_addr32, + xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32, + "XEON B2B USD split-BAR 4 32-bit address"); + +module_param_named(xeon_b2b_usd_bar5_addr32, + xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32, + "XEON B2B USD split-BAR 5 32-bit address"); + +module_param_named(xeon_b2b_dsd_bar2_addr64, + xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, + "XEON B2B DSD BAR 2 64-bit address"); + +module_param_named(xeon_b2b_dsd_bar4_addr64, + xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64, + "XEON B2B DSD BAR 4 64-bit address"); + +module_param_named(xeon_b2b_dsd_bar4_addr32, + xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32, + "XEON B2B DSD split-BAR 4 32-bit address"); + +module_param_named(xeon_b2b_dsd_bar5_addr32, + xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); +MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, + "XEON B2B DSD split-BAR 5 32-bit address"); + + +static int xeon_init_isr(struct intel_ntb_dev *ndev); + +static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev) +{ + ndev->unsafe_flags = 0; + ndev->unsafe_flags_ignore = 0; + + /* Only B2B has a workaround to avoid SDOORBELL */ + if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) + if (!ntb_topo_is_b2b(ndev->ntb.topo)) + ndev->unsafe_flags |= NTB_UNSAFE_DB; + + /* No low level workaround to avoid SB01BASE */ + if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) { + ndev->unsafe_flags |= NTB_UNSAFE_DB; + ndev->unsafe_flags |= NTB_UNSAFE_SPAD; + } +} + +static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev, + unsigned long flag) +{ + return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore); +} + +static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev, + unsigned long flag) +{ + flag &= ndev->unsafe_flags; + ndev->unsafe_flags_ignore |= flag; + + return !!flag; +} + +int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx) +{ + if (idx < 0 || idx >= ndev->mw_count) + return -EINVAL; + return ndev->reg->mw_bar[idx]; +} + +void ndev_db_addr(struct intel_ntb_dev *ndev, + phys_addr_t *db_addr, resource_size_t *db_size, + phys_addr_t reg_addr, unsigned long reg) +{ + if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) + pr_warn_once("%s: NTB unsafe doorbell access", __func__); + + if (db_addr) { + *db_addr = reg_addr + reg; + dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr); + } + + if (db_size) { + *db_size = ndev->reg->db_size; + dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); + } +} + +u64 ndev_db_read(struct intel_ntb_dev *ndev, + void __iomem *mmio) +{ + if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) + pr_warn_once("%s: NTB unsafe doorbell access", __func__); + + return ndev->reg->db_ioread(mmio); +} + +int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, + void __iomem *mmio) +{ + if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) + pr_warn_once("%s: NTB unsafe doorbell access", __func__); + + if (db_bits & ~ndev->db_valid_mask) + return -EINVAL; + + ndev->reg->db_iowrite(db_bits, mmio); + + return 0; +} + +static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits, + void __iomem *mmio) +{ + unsigned long irqflags; + + if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) + pr_warn_once("%s: NTB unsafe doorbell access", __func__); + + if (db_bits & ~ndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&ndev->db_mask_lock, irqflags); + { + ndev->db_mask |= db_bits; + ndev->reg->db_iowrite(ndev->db_mask, mmio); + } + spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); + + return 0; +} + +static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, + void __iomem *mmio) +{ + unsigned long irqflags; + + if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB)) + pr_warn_once("%s: NTB unsafe doorbell access", __func__); + + if (db_bits & ~ndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&ndev->db_mask_lock, irqflags); + { + ndev->db_mask &= ~db_bits; + ndev->reg->db_iowrite(ndev->db_mask, mmio); + } + spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags); + + return 0; +} + +static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) +{ + u64 shift, mask; + + shift = ndev->db_vec_shift; + mask = BIT_ULL(shift) - 1; + + return mask << (shift * db_vector); +} + +static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx, + phys_addr_t *spad_addr, phys_addr_t reg_addr, + unsigned long reg) +{ + if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) + pr_warn_once("%s: NTB unsafe scratchpad access", __func__); + + if (idx < 0 || idx >= ndev->spad_count) + return -EINVAL; + + if (spad_addr) { + *spad_addr = reg_addr + reg + (idx << 2); + dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n", + *spad_addr); + } + + return 0; +} + +static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx, + void __iomem *mmio) +{ + if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) + pr_warn_once("%s: NTB unsafe scratchpad access", __func__); + + if (idx < 0 || idx >= ndev->spad_count) + return 0; + + return ioread32(mmio + (idx << 2)); +} + +static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val, + void __iomem *mmio) +{ + if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD)) + pr_warn_once("%s: NTB unsafe scratchpad access", __func__); + + if (idx < 0 || idx >= ndev->spad_count) + return -EINVAL; + + iowrite32(val, mmio + (idx << 2)); + + return 0; +} + +static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec) +{ + u64 vec_mask; + + vec_mask = ndev_vec_mask(ndev, vec); + + if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31)) + vec_mask |= ndev->db_link_mask; + + dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask); + + ndev->last_ts = jiffies; + + if (vec_mask & ndev->db_link_mask) { + if (ndev->reg->poll_link(ndev)) + ntb_link_event(&ndev->ntb); + } + + if (vec_mask & ndev->db_valid_mask) + ntb_db_event(&ndev->ntb, vec); + + return IRQ_HANDLED; +} + +static irqreturn_t ndev_vec_isr(int irq, void *dev) +{ + struct intel_ntb_vec *nvec = dev; + + dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n", + irq, nvec->num); + + return ndev_interrupt(nvec->ndev, nvec->num); +} + +static irqreturn_t ndev_irq_isr(int irq, void *dev) +{ + struct intel_ntb_dev *ndev = dev; + + return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); +} + +int ndev_init_isr(struct intel_ntb_dev *ndev, + int msix_min, int msix_max, + int msix_shift, int total_shift) +{ + struct pci_dev *pdev; + int rc, i, msix_count, node; + + pdev = ndev->ntb.pdev; + + node = dev_to_node(&pdev->dev); + + /* Mask all doorbell interrupts */ + ndev->db_mask = ndev->db_valid_mask; + ndev->reg->db_iowrite(ndev->db_mask, + ndev->self_mmio + + ndev->self_reg->db_mask); + + /* Try to set up msix irq */ + + ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec), + GFP_KERNEL, node); + if (!ndev->vec) + goto err_msix_vec_alloc; + + ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix), + GFP_KERNEL, node); + if (!ndev->msix) + goto err_msix_alloc; + + for (i = 0; i < msix_max; ++i) + ndev->msix[i].entry = i; + + msix_count = pci_enable_msix_range(pdev, ndev->msix, + msix_min, msix_max); + if (msix_count < 0) + goto err_msix_enable; + + for (i = 0; i < msix_count; ++i) { + ndev->vec[i].ndev = ndev; + ndev->vec[i].num = i; + rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, + "ndev_vec_isr", &ndev->vec[i]); + if (rc) + goto err_msix_request; + } + + dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count); + ndev->db_vec_count = msix_count; + ndev->db_vec_shift = msix_shift; + return 0; + +err_msix_request: + while (i-- > 0) + free_irq(ndev->msix[i].vector, &ndev->vec[i]); + pci_disable_msix(pdev); +err_msix_enable: + kfree(ndev->msix); +err_msix_alloc: + kfree(ndev->vec); +err_msix_vec_alloc: + ndev->msix = NULL; + ndev->vec = NULL; + + /* Try to set up msi irq */ + + rc = pci_enable_msi(pdev); + if (rc) + goto err_msi_enable; + + rc = request_irq(pdev->irq, ndev_irq_isr, 0, + "ndev_irq_isr", ndev); + if (rc) + goto err_msi_request; + + dev_dbg(&pdev->dev, "Using msi interrupts\n"); + ndev->db_vec_count = 1; + ndev->db_vec_shift = total_shift; + return 0; + +err_msi_request: + pci_disable_msi(pdev); +err_msi_enable: + + /* Try to set up intx irq */ + + pci_intx(pdev, 1); + + rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED, + "ndev_irq_isr", ndev); + if (rc) + goto err_intx_request; + + dev_dbg(&pdev->dev, "Using intx interrupts\n"); + ndev->db_vec_count = 1; + ndev->db_vec_shift = total_shift; + return 0; + +err_intx_request: + return rc; +} + +static void ndev_deinit_isr(struct intel_ntb_dev *ndev) +{ + struct pci_dev *pdev; + int i; + + pdev = ndev->ntb.pdev; + + /* Mask all doorbell interrupts */ + ndev->db_mask = ndev->db_valid_mask; + ndev->reg->db_iowrite(ndev->db_mask, + ndev->self_mmio + + ndev->self_reg->db_mask); + + if (ndev->msix) { + i = ndev->db_vec_count; + while (i--) + free_irq(ndev->msix[i].vector, &ndev->vec[i]); + pci_disable_msix(pdev); + kfree(ndev->msix); + kfree(ndev->vec); + } else { + free_irq(pdev->irq, ndev); + if (pci_dev_msi_enabled(pdev)) + pci_disable_msi(pdev); + } +} + +static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct intel_ntb_dev *ndev; + struct pci_dev *pdev; + void __iomem *mmio; + char *buf; + size_t buf_size; + ssize_t ret, off; + union { u64 v64; u32 v32; u16 v16; u8 v8; } u; + + ndev = filp->private_data; + pdev = ndev->ntb.pdev; + mmio = ndev->self_mmio; + + buf_size = min(count, 0x800ul); + + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + off = 0; + + off += scnprintf(buf + off, buf_size - off, + "NTB Device Information:\n"); + + off += scnprintf(buf + off, buf_size - off, + "Connection Topology -\t%s\n", + ntb_topo_string(ndev->ntb.topo)); + + if (ndev->b2b_idx != UINT_MAX) { + off += scnprintf(buf + off, buf_size - off, + "B2B MW Idx -\t\t%u\n", ndev->b2b_idx); + off += scnprintf(buf + off, buf_size - off, + "B2B Offset -\t\t%#lx\n", ndev->b2b_off); + } + + off += scnprintf(buf + off, buf_size - off, + "BAR4 Split -\t\t%s\n", + ndev->bar4_split ? "yes" : "no"); + + off += scnprintf(buf + off, buf_size - off, + "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); + off += scnprintf(buf + off, buf_size - off, + "LNK STA -\t\t%#06x\n", ndev->lnk_sta); + + if (!ndev->reg->link_is_up(ndev)) { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tDown\n"); + } else { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tUp\n"); + off += scnprintf(buf + off, buf_size - off, + "Link Speed -\t\tPCI-E Gen %u\n", + NTB_LNK_STA_SPEED(ndev->lnk_sta)); + off += scnprintf(buf + off, buf_size - off, + "Link Width -\t\tx%u\n", + NTB_LNK_STA_WIDTH(ndev->lnk_sta)); + } + + off += scnprintf(buf + off, buf_size - off, + "Memory Window Count -\t%u\n", ndev->mw_count); + off += scnprintf(buf + off, buf_size - off, + "Scratchpad Count -\t%u\n", ndev->spad_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Count -\t%u\n", ndev->db_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); + + off += scnprintf(buf + off, buf_size - off, + "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask -\t\t%#llx\n", u.v64); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Bell -\t\t%#llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Window Size:\n"); + + pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "PBAR23SZ %hhu\n", u.v8); + if (!ndev->bar4_split) { + pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "PBAR45SZ %hhu\n", u.v8); + } else { + pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "PBAR4SZ %hhu\n", u.v8); + pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "PBAR5SZ %hhu\n", u.v8); + } + + pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "SBAR23SZ %hhu\n", u.v8); + if (!ndev->bar4_split) { + pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "SBAR45SZ %hhu\n", u.v8); + } else { + pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "SBAR4SZ %hhu\n", u.v8); + pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8); + off += scnprintf(buf + off, buf_size - off, + "SBAR5SZ %hhu\n", u.v8); + } + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Incoming XLAT:\n"); + + u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2)); + off += scnprintf(buf + off, buf_size - off, + "XLAT23 -\t\t%#018llx\n", u.v64); + + if (ndev->bar4_split) { + u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); + off += scnprintf(buf + off, buf_size - off, + "XLAT4 -\t\t\t%#06x\n", u.v32); + + u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5)); + off += scnprintf(buf + off, buf_size - off, + "XLAT5 -\t\t\t%#06x\n", u.v32); + } else { + u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4)); + off += scnprintf(buf + off, buf_size - off, + "XLAT45 -\t\t%#018llx\n", u.v64); + } + + u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2)); + off += scnprintf(buf + off, buf_size - off, + "LMT23 -\t\t\t%#018llx\n", u.v64); + + if (ndev->bar4_split) { + u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); + off += scnprintf(buf + off, buf_size - off, + "LMT4 -\t\t\t%#06x\n", u.v32); + u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5)); + off += scnprintf(buf + off, buf_size - off, + "LMT5 -\t\t\t%#06x\n", u.v32); + } else { + u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4)); + off += scnprintf(buf + off, buf_size - off, + "LMT45 -\t\t\t%#018llx\n", u.v64); + } + + if (pdev_is_gen1(pdev)) { + if (ntb_topo_is_b2b(ndev->ntb.topo)) { + off += scnprintf(buf + off, buf_size - off, + "\nNTB Outgoing B2B XLAT:\n"); + + u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B XLAT23 -\t\t%#018llx\n", u.v64); + + if (ndev->bar4_split) { + u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B XLAT4 -\t\t%#06x\n", + u.v32); + u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B XLAT5 -\t\t%#06x\n", + u.v32); + } else { + u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B XLAT45 -\t\t%#018llx\n", + u.v64); + } + + u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B LMT23 -\t\t%#018llx\n", u.v64); + + if (ndev->bar4_split) { + u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B LMT4 -\t\t%#06x\n", + u.v32); + u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B LMT5 -\t\t%#06x\n", + u.v32); + } else { + u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "B2B LMT45 -\t\t%#018llx\n", + u.v64); + } + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Secondary BAR:\n"); + + u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "SBAR01 -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "SBAR23 -\t\t%#018llx\n", u.v64); + + if (ndev->bar4_split) { + u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "SBAR4 -\t\t\t%#06x\n", u.v32); + u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "SBAR5 -\t\t\t%#06x\n", u.v32); + } else { + u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "SBAR45 -\t\t%#018llx\n", + u.v64); + } + } + + off += scnprintf(buf + off, buf_size - off, + "\nXEON NTB Statistics:\n"); + + u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "Upstream Memory Miss -\t%u\n", u.v16); + + off += scnprintf(buf + off, buf_size - off, + "\nXEON NTB Hardware Errors:\n"); + + if (!pci_read_config_word(pdev, + XEON_DEVSTS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "DEVSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_word(pdev, + XEON_LINK_STATUS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "LNKSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_dword(pdev, + XEON_UNCERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "UNCERRSTS -\t\t%#06x\n", u.v32); + + if (!pci_read_config_dword(pdev, + XEON_CORERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "CORERRSTS -\t\t%#06x\n", u.v32); + } + + ret = simple_read_from_buffer(ubuf, count, offp, buf, off); + kfree(buf); + return ret; +} + +static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct intel_ntb_dev *ndev = filp->private_data; + + if (pdev_is_gen1(ndev->ntb.pdev)) + return ndev_ntb_debugfs_read(filp, ubuf, count, offp); + else if (pdev_is_gen3(ndev->ntb.pdev)) + return ndev_ntb3_debugfs_read(filp, ubuf, count, offp); + else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev)) + return ndev_ntb4_debugfs_read(filp, ubuf, count, offp); + + return -ENXIO; +} + +static void ndev_init_debugfs(struct intel_ntb_dev *ndev) +{ + if (!debugfs_dir) { + ndev->debugfs_dir = NULL; + ndev->debugfs_info = NULL; + } else { + ndev->debugfs_dir = + debugfs_create_dir(pci_name(ndev->ntb.pdev), + debugfs_dir); + if (!ndev->debugfs_dir) + ndev->debugfs_info = NULL; + else + ndev->debugfs_info = + debugfs_create_file("info", S_IRUSR, + ndev->debugfs_dir, ndev, + &intel_ntb_debugfs_info); + } +} + +static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev) +{ + debugfs_remove_recursive(ndev->debugfs_dir); +} + +int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx) +{ + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + return ntb_ndev(ntb)->mw_count; +} + +int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + resource_size_t bar_size, mw_size; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + if (addr_align) + *addr_align = pci_resource_len(ndev->ntb.pdev, bar); + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = mw_size; + + return 0; +} + +static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + unsigned long base_reg, xlat_reg, limit_reg; + resource_size_t bar_size, mw_size; + void __iomem *mmio; + u64 base, limit, reg_val; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + /* hardware requires that addr is aligned to bar size */ + if (addr & (bar_size - 1)) + return -EINVAL; + + /* make sure the range fits in the usable mw size */ + if (size > mw_size) + return -EINVAL; + + mmio = ndev->self_mmio; + base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar); + xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar); + limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar); + + if (bar < 4 || !ndev->bar4_split) { + base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64; + + /* Set the limit if supported, if size is not mw_size */ + if (limit_reg && size != mw_size) + limit = base + size; + else + limit = 0; + + /* set and verify setting the translation address */ + iowrite64(addr, mmio + xlat_reg); + reg_val = ioread64(mmio + xlat_reg); + if (reg_val != addr) { + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + /* set and verify setting the limit */ + iowrite64(limit, mmio + limit_reg); + reg_val = ioread64(mmio + limit_reg); + if (reg_val != limit) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + } else { + /* split bar addr range must all be 32 bit */ + if (addr & (~0ull << 32)) + return -EINVAL; + if ((addr + size) & (~0ull << 32)) + return -EINVAL; + + base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32; + + /* Set the limit if supported, if size is not mw_size */ + if (limit_reg && size != mw_size) + limit = base + size; + else + limit = 0; + + /* set and verify setting the translation address */ + iowrite32(addr, mmio + xlat_reg); + reg_val = ioread32(mmio + xlat_reg); + if (reg_val != addr) { + iowrite32(0, mmio + xlat_reg); + return -EIO; + } + + /* set and verify setting the limit */ + iowrite32(limit, mmio + limit_reg); + reg_val = ioread32(mmio + limit_reg); + if (reg_val != limit) { + iowrite32(base, mmio + limit_reg); + iowrite32(0, mmio + xlat_reg); + return -EIO; + } + } + + return 0; +} + +u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, + enum ntb_width *width) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + if (ndev->reg->link_is_up(ndev)) { + if (speed) + *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta); + if (width) + *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); + return 1; + } else { + /* TODO MAYBE: is it possible to observe the link speed and + * width while link is training? */ + if (speed) + *speed = NTB_SPEED_NONE; + if (width) + *width = NTB_WIDTH_NONE; + return 0; + } +} + +static int intel_ntb_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + struct intel_ntb_dev *ndev; + u32 ntb_ctl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + if (ndev->ntb.topo == NTB_TOPO_SEC) + return -EINVAL; + + dev_dbg(&ntb->pdev->dev, + "Enabling link with max_speed %d max_width %d\n", + max_speed, max_width); + if (max_speed != NTB_SPEED_AUTO) + dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed); + if (max_width != NTB_WIDTH_AUTO) + dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width); + + ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); + ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); + ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP; + ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP; + if (ndev->bar4_split) + ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP; + iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); + + return 0; +} + +int intel_ntb_link_disable(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev; + u32 ntb_cntl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + if (ndev->ntb.topo == NTB_TOPO_SEC) + return -EINVAL; + + dev_dbg(&ntb->pdev->dev, "Disabling link\n"); + + /* Bring NTB link down */ + ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); + ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP); + ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP); + if (ndev->bar4_split) + ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP); + ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK; + iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl); + + return 0; +} + +int intel_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + /* Numbers of inbound and outbound memory windows match */ + return ntb_ndev(ntb)->mw_count; +} + +int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + int bar; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + if (base) + *base = pci_resource_start(ndev->ntb.pdev, bar) + + (idx == ndev->b2b_idx ? ndev->b2b_off : 0); + + if (size) + *size = pci_resource_len(ndev->ntb.pdev, bar) - + (idx == ndev->b2b_idx ? ndev->b2b_off : 0); + + return 0; +} + +static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb) +{ + return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB); +} + +u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->db_valid_mask; +} + +int intel_ntb_db_vector_count(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + return ndev->db_vec_count; +} + +u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + if (db_vector < 0 || db_vector > ndev->db_vec_count) + return 0; + + return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector); +} + +static u64 intel_ntb_db_read(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_read(ndev, + ndev->self_mmio + + ndev->self_reg->db_bell); +} + +static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_write(ndev, db_bits, + ndev->self_mmio + + ndev->self_reg->db_bell); +} + +int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_set_mask(ndev, db_bits, + ndev->self_mmio + + ndev->self_reg->db_mask); +} + +int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_clear_mask(ndev, db_bits, + ndev->self_mmio + + ndev->self_reg->db_mask); +} + +static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, u64 *db_data, int db_bit) +{ + u64 db_bits; + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + + db_bits = BIT_ULL(db_bit); + + if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask)) + return -EINVAL; + + ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, + ndev->peer_reg->db_bell); + + if (db_data) + *db_data = db_bits; + + + return 0; +} + +static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_write(ndev, db_bits, + ndev->peer_mmio + + ndev->peer_reg->db_bell); +} + +int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb) +{ + return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD); +} + +int intel_ntb_spad_count(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + return ndev->spad_count; +} + +u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_spad_read(ndev, idx, + ndev->self_mmio + + ndev->self_reg->spad); +} + +int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_spad_write(ndev, idx, val, + ndev->self_mmio + + ndev->self_reg->spad); +} + +int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, + phys_addr_t *spad_addr) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr, + ndev->peer_reg->spad); +} + +u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_spad_read(ndev, sidx, + ndev->peer_mmio + + ndev->peer_reg->spad); +} + +int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, + u32 val) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_spad_write(ndev, sidx, val, + ndev->peer_mmio + + ndev->peer_reg->spad); +} + +static u64 xeon_db_ioread(const void __iomem *mmio) +{ + return (u64)ioread16(mmio); +} + +static void xeon_db_iowrite(u64 bits, void __iomem *mmio) +{ + iowrite16((u16)bits, mmio); +} + +static int xeon_poll_link(struct intel_ntb_dev *ndev) +{ + u16 reg_val; + int rc; + + ndev->reg->db_iowrite(ndev->db_link_mask, + ndev->self_mmio + + ndev->self_reg->db_bell); + + rc = pci_read_config_word(ndev->ntb.pdev, + XEON_LINK_STATUS_OFFSET, ®_val); + if (rc) + return 0; + + if (reg_val == ndev->lnk_sta) + return 0; + + ndev->lnk_sta = reg_val; + + return 1; +} + +int xeon_link_is_up(struct intel_ntb_dev *ndev) +{ + if (ndev->ntb.topo == NTB_TOPO_SEC) + return 1; + + return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); +} + +enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) +{ + switch (ppd & XEON_PPD_TOPO_MASK) { + case XEON_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + + case XEON_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + + case XEON_PPD_TOPO_PRI_USD: + case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */ + return NTB_TOPO_PRI; + + case XEON_PPD_TOPO_SEC_USD: + case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */ + return NTB_TOPO_SEC; + } + + return NTB_TOPO_NONE; +} + +static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd) +{ + if (ppd & XEON_PPD_SPLIT_BAR_MASK) { + dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd); + return 1; + } + return 0; +} + +static int xeon_init_isr(struct intel_ntb_dev *ndev) +{ + return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT, + XEON_DB_MSIX_VECTOR_COUNT, + XEON_DB_MSIX_VECTOR_SHIFT, + XEON_DB_TOTAL_SHIFT); +} + +static void xeon_deinit_isr(struct intel_ntb_dev *ndev) +{ + ndev_deinit_isr(ndev); +} + +static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, + const struct intel_b2b_addr *addr, + const struct intel_b2b_addr *peer_addr) +{ + struct pci_dev *pdev; + void __iomem *mmio; + resource_size_t bar_size; + phys_addr_t bar_addr; + int b2b_bar; + u8 bar_sz; + + pdev = ndev->ntb.pdev; + mmio = ndev->self_mmio; + + if (ndev->b2b_idx == UINT_MAX) { + dev_dbg(&pdev->dev, "not using b2b mw\n"); + b2b_bar = 0; + ndev->b2b_off = 0; + } else { + b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); + if (b2b_bar < 0) + return -EIO; + + dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar); + + bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); + + dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size); + + if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) { + dev_dbg(&pdev->dev, "b2b using first half of bar\n"); + ndev->b2b_off = bar_size >> 1; + } else if (XEON_B2B_MIN_SIZE <= bar_size) { + dev_dbg(&pdev->dev, "b2b using whole bar\n"); + ndev->b2b_off = 0; + --ndev->mw_count; + } else { + dev_dbg(&pdev->dev, "b2b bar size is too small\n"); + return -EIO; + } + } + + /* Reset the secondary bar sizes to match the primary bar sizes, + * except disable or halve the size of the b2b secondary bar. + * + * Note: code for each specific bar size register, because the register + * offsets are not in a consistent order (bar5sz comes after ppd, odd). + */ + pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz); + if (b2b_bar == 2) { + if (ndev->b2b_off) + bar_sz -= 1; + else + bar_sz = 0; + } + pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz); + pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz); + + if (!ndev->bar4_split) { + pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz); + if (b2b_bar == 4) { + if (ndev->b2b_off) + bar_sz -= 1; + else + bar_sz = 0; + } + pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz); + pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz); + } else { + pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz); + if (b2b_bar == 4) { + if (ndev->b2b_off) + bar_sz -= 1; + else + bar_sz = 0; + } + pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz); + pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz); + + pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz); + if (b2b_bar == 5) { + if (ndev->b2b_off) + bar_sz -= 1; + else + bar_sz = 0; + } + pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz); + pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz); + dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz); + } + + /* SBAR01 hit by first part of the b2b bar */ + if (b2b_bar == 0) + bar_addr = addr->bar0_addr; + else if (b2b_bar == 2) + bar_addr = addr->bar2_addr64; + else if (b2b_bar == 4 && !ndev->bar4_split) + bar_addr = addr->bar4_addr64; + else if (b2b_bar == 4) + bar_addr = addr->bar4_addr32; + else if (b2b_bar == 5) + bar_addr = addr->bar5_addr32; + else + return -EIO; + + dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr); + iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET); + + /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar. + * The b2b bar is either disabled above, or configured half-size, and + * it starts at the PBAR xlat + offset. + */ + + bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); + iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET); + bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET); + dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr); + + if (!ndev->bar4_split) { + bar_addr = addr->bar4_addr64 + + (b2b_bar == 4 ? ndev->b2b_off : 0); + iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET); + bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET); + dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr); + } else { + bar_addr = addr->bar4_addr32 + + (b2b_bar == 4 ? ndev->b2b_off : 0); + iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET); + bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET); + dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr); + + bar_addr = addr->bar5_addr32 + + (b2b_bar == 5 ? ndev->b2b_off : 0); + iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET); + bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET); + dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr); + } + + /* setup incoming bar limits == base addrs (zero length windows) */ + + bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); + iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET); + bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET); + dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr); + + if (!ndev->bar4_split) { + bar_addr = addr->bar4_addr64 + + (b2b_bar == 4 ? ndev->b2b_off : 0); + iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET); + bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET); + dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr); + } else { + bar_addr = addr->bar4_addr32 + + (b2b_bar == 4 ? ndev->b2b_off : 0); + iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET); + bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET); + dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr); + + bar_addr = addr->bar5_addr32 + + (b2b_bar == 5 ? ndev->b2b_off : 0); + iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET); + bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET); + dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr); + } + + /* zero incoming translation addrs */ + iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET); + + if (!ndev->bar4_split) { + iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET); + } else { + iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET); + iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET); + } + + /* zero outgoing translation limits (whole bar size windows) */ + iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET); + if (!ndev->bar4_split) { + iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET); + } else { + iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET); + iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET); + } + + /* set outgoing translation offsets */ + bar_addr = peer_addr->bar2_addr64; + iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET); + bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET); + dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr); + + if (!ndev->bar4_split) { + bar_addr = peer_addr->bar4_addr64; + iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET); + bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET); + dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr); + } else { + bar_addr = peer_addr->bar4_addr32; + iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET); + bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET); + dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr); + + bar_addr = peer_addr->bar5_addr32; + iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET); + bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET); + dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr); + } + + /* set the translation offset for b2b registers */ + if (b2b_bar == 0) + bar_addr = peer_addr->bar0_addr; + else if (b2b_bar == 2) + bar_addr = peer_addr->bar2_addr64; + else if (b2b_bar == 4 && !ndev->bar4_split) + bar_addr = peer_addr->bar4_addr64; + else if (b2b_bar == 4) + bar_addr = peer_addr->bar4_addr32; + else if (b2b_bar == 5) + bar_addr = peer_addr->bar5_addr32; + else + return -EIO; + + /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */ + dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr); + iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL); + iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU); + + if (b2b_bar) { + /* map peer ntb mmio config space registers */ + ndev->peer_mmio = pci_iomap(pdev, b2b_bar, + XEON_B2B_MIN_SIZE); + if (!ndev->peer_mmio) + return -EIO; + + ndev->peer_addr = pci_resource_start(pdev, b2b_bar); + } + + return 0; +} + +static int xeon_init_ntb(struct intel_ntb_dev *ndev) +{ + struct device *dev = &ndev->ntb.pdev->dev; + int rc; + u32 ntb_ctl; + + if (ndev->bar4_split) + ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT; + else + ndev->mw_count = XEON_MW_COUNT; + + ndev->spad_count = XEON_SPAD_COUNT; + ndev->db_count = XEON_DB_COUNT; + ndev->db_link_mask = XEON_DB_LINK_BIT; + + switch (ndev->ntb.topo) { + case NTB_TOPO_PRI: + if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { + dev_err(dev, "NTB Primary config disabled\n"); + return -EINVAL; + } + + /* enable link to allow secondary side device to appear */ + ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); + ntb_ctl &= ~NTB_CTL_DISABLE; + iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); + + /* use half the spads for the peer */ + ndev->spad_count >>= 1; + ndev->self_reg = &xeon_pri_reg; + ndev->peer_reg = &xeon_sec_reg; + ndev->xlat_reg = &xeon_sec_xlat; + break; + + case NTB_TOPO_SEC: + if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { + dev_err(dev, "NTB Secondary config disabled\n"); + return -EINVAL; + } + /* use half the spads for the peer */ + ndev->spad_count >>= 1; + ndev->self_reg = &xeon_sec_reg; + ndev->peer_reg = &xeon_pri_reg; + ndev->xlat_reg = &xeon_pri_xlat; + break; + + case NTB_TOPO_B2B_USD: + case NTB_TOPO_B2B_DSD: + ndev->self_reg = &xeon_pri_reg; + ndev->peer_reg = &xeon_b2b_reg; + ndev->xlat_reg = &xeon_sec_xlat; + + if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { + ndev->peer_reg = &xeon_pri_reg; + + if (b2b_mw_idx < 0) + ndev->b2b_idx = b2b_mw_idx + ndev->mw_count; + else + ndev->b2b_idx = b2b_mw_idx; + + if (ndev->b2b_idx >= ndev->mw_count) { + dev_dbg(dev, + "b2b_mw_idx %d invalid for mw_count %u\n", + b2b_mw_idx, ndev->mw_count); + return -EINVAL; + } + + dev_dbg(dev, "setting up b2b mw idx %d means %d\n", + b2b_mw_idx, ndev->b2b_idx); + + } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) { + dev_warn(dev, "Reduce doorbell count by 1\n"); + ndev->db_count -= 1; + } + + if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { + rc = xeon_setup_b2b_mw(ndev, + &xeon_b2b_dsd_addr, + &xeon_b2b_usd_addr); + } else { + rc = xeon_setup_b2b_mw(ndev, + &xeon_b2b_usd_addr, + &xeon_b2b_dsd_addr); + } + if (rc) + return rc; + + /* Enable Bus Master and Memory Space on the secondary side */ + iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, + ndev->self_mmio + XEON_SPCICMD_OFFSET); + + break; + + default: + return -EINVAL; + } + + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + ndev->reg->db_iowrite(ndev->db_valid_mask, + ndev->self_mmio + + ndev->self_reg->db_mask); + + return 0; +} + +static int xeon_init_dev(struct intel_ntb_dev *ndev) +{ + struct pci_dev *pdev; + u8 ppd; + int rc, mem; + + pdev = ndev->ntb.pdev; + + switch (pdev->device) { + /* There is a Xeon hardware errata related to writes to SDOORBELL or + * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space, + * which may hang the system. To workaround this use the second memory + * window to access the interrupt and scratch pad registers on the + * remote system. + */ + case PCI_DEVICE_ID_INTEL_NTB_SS_JSF: + case PCI_DEVICE_ID_INTEL_NTB_PS_JSF: + case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF: + case PCI_DEVICE_ID_INTEL_NTB_SS_SNB: + case PCI_DEVICE_ID_INTEL_NTB_PS_SNB: + case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB: + case PCI_DEVICE_ID_INTEL_NTB_SS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_PS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT: + case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: + case PCI_DEVICE_ID_INTEL_NTB_SS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_PS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX: + ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP; + break; + } + + switch (pdev->device) { + /* There is a hardware errata related to accessing any register in + * SB01BASE in the presence of bidirectional traffic crossing the NTB. + */ + case PCI_DEVICE_ID_INTEL_NTB_SS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_PS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT: + case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: + case PCI_DEVICE_ID_INTEL_NTB_SS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_PS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX: + ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP; + break; + } + + switch (pdev->device) { + /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be + * mirrored to the remote system. Shrink the number of bits by one, + * since bit 14 is the last bit. + */ + case PCI_DEVICE_ID_INTEL_NTB_SS_JSF: + case PCI_DEVICE_ID_INTEL_NTB_PS_JSF: + case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF: + case PCI_DEVICE_ID_INTEL_NTB_SS_SNB: + case PCI_DEVICE_ID_INTEL_NTB_PS_SNB: + case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB: + case PCI_DEVICE_ID_INTEL_NTB_SS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_PS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT: + case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: + case PCI_DEVICE_ID_INTEL_NTB_SS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_PS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX: + ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14; + break; + } + + ndev->reg = &xeon_reg; + + rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd); + if (rc) + return -EIO; + + ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); + dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd, + ntb_topo_string(ndev->ntb.topo)); + if (ndev->ntb.topo == NTB_TOPO_NONE) + return -EINVAL; + + if (ndev->ntb.topo != NTB_TOPO_SEC) { + ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd); + dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n", + ppd, ndev->bar4_split); + } else { + /* This is a way for transparent BAR to figure out if we are + * doing split BAR or not. There is no way for the hw on the + * transparent side to know and set the PPD. + */ + mem = pci_select_bars(pdev, IORESOURCE_MEM); + ndev->bar4_split = hweight32(mem) == + HSX_SPLIT_BAR_MW_COUNT + 1; + dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n", + mem, ndev->bar4_split); + } + + rc = xeon_init_ntb(ndev); + if (rc) + return rc; + + return xeon_init_isr(ndev); +} + +static void xeon_deinit_dev(struct intel_ntb_dev *ndev) +{ + xeon_deinit_isr(ndev); +} + +static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) +{ + int rc; + + pci_set_drvdata(pdev, ndev); + + rc = pci_enable_device(pdev); + if (rc) + goto err_pci_enable; + + rc = pci_request_regions(pdev, NTB_NAME); + if (rc) + goto err_pci_regions; + + pci_set_master(pdev); + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + goto err_dma_mask; + dev_warn(&pdev->dev, "Cannot DMA highmem\n"); + } + + ndev->self_mmio = pci_iomap(pdev, 0, 0); + if (!ndev->self_mmio) { + rc = -EIO; + goto err_mmio; + } + ndev->peer_mmio = ndev->self_mmio; + ndev->peer_addr = pci_resource_start(pdev, 0); + + return 0; + +err_mmio: +err_dma_mask: + pci_release_regions(pdev); +err_pci_regions: + pci_disable_device(pdev); +err_pci_enable: + pci_set_drvdata(pdev, NULL); + return rc; +} + +static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + + if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio) + pci_iounmap(pdev, ndev->peer_mmio); + pci_iounmap(pdev, ndev->self_mmio); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static inline void ndev_init_struct(struct intel_ntb_dev *ndev, + struct pci_dev *pdev) +{ + ndev->ntb.pdev = pdev; + ndev->ntb.topo = NTB_TOPO_NONE; + ndev->ntb.ops = &intel_ntb_ops; + + ndev->b2b_off = 0; + ndev->b2b_idx = UINT_MAX; + + ndev->bar4_split = 0; + + ndev->mw_count = 0; + ndev->spad_count = 0; + ndev->db_count = 0; + ndev->db_vec_count = 0; + ndev->db_vec_shift = 0; + + ndev->ntb_ctl = 0; + ndev->lnk_sta = 0; + + ndev->db_valid_mask = 0; + ndev->db_link_mask = 0; + ndev->db_mask = 0; + + spin_lock_init(&ndev->db_mask_lock); +} + +static int intel_ntb_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct intel_ntb_dev *ndev; + int rc, node; + + node = dev_to_node(&pdev->dev); + ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); + if (!ndev) { + rc = -ENOMEM; + goto err_ndev; + } + + ndev_init_struct(ndev, pdev); + + if (pdev_is_gen1(pdev)) { + rc = intel_ntb_init_pci(ndev, pdev); + if (rc) + goto err_init_pci; + + rc = xeon_init_dev(ndev); + if (rc) + goto err_init_dev; + } else if (pdev_is_gen3(pdev)) { + ndev->ntb.ops = &intel_ntb3_ops; + rc = intel_ntb_init_pci(ndev, pdev); + if (rc) + goto err_init_pci; + + rc = gen3_init_dev(ndev); + if (rc) + goto err_init_dev; + } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) { + ndev->ntb.ops = &intel_ntb4_ops; + rc = intel_ntb_init_pci(ndev, pdev); + if (rc) + goto err_init_pci; + + rc = gen4_init_dev(ndev); + if (rc) + goto err_init_dev; + } else { + rc = -EINVAL; + goto err_init_pci; + } + + ndev_reset_unsafe_flags(ndev); + + ndev->reg->poll_link(ndev); + + ndev_init_debugfs(ndev); + + rc = ntb_register_device(&ndev->ntb); + if (rc) + goto err_register; + + dev_info(&pdev->dev, "NTB device registered.\n"); + + return 0; + +err_register: + ndev_deinit_debugfs(ndev); + if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || + pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) + xeon_deinit_dev(ndev); +err_init_dev: + intel_ntb_deinit_pci(ndev); +err_init_pci: + kfree(ndev); +err_ndev: + return rc; +} + +static void intel_ntb_pci_remove(struct pci_dev *pdev) +{ + struct intel_ntb_dev *ndev = pci_get_drvdata(pdev); + + ntb_unregister_device(&ndev->ntb); + ndev_deinit_debugfs(ndev); + if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || + pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) + xeon_deinit_dev(ndev); + intel_ntb_deinit_pci(ndev); + kfree(ndev); +} + +static const struct intel_ntb_reg xeon_reg = { + .poll_link = xeon_poll_link, + .link_is_up = xeon_link_is_up, + .db_ioread = xeon_db_ioread, + .db_iowrite = xeon_db_iowrite, + .db_size = sizeof(u32), + .ntb_ctl = XEON_NTBCNTL_OFFSET, + .mw_bar = {2, 4, 5}, +}; + +static const struct intel_ntb_alt_reg xeon_pri_reg = { + .db_bell = XEON_PDOORBELL_OFFSET, + .db_mask = XEON_PDBMSK_OFFSET, + .spad = XEON_SPAD_OFFSET, +}; + +static const struct intel_ntb_alt_reg xeon_sec_reg = { + .db_bell = XEON_SDOORBELL_OFFSET, + .db_mask = XEON_SDBMSK_OFFSET, + /* second half of the scratchpads */ + .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1), +}; + +static const struct intel_ntb_alt_reg xeon_b2b_reg = { + .db_bell = XEON_B2B_DOORBELL_OFFSET, + .spad = XEON_B2B_SPAD_OFFSET, +}; + +static const struct intel_ntb_xlat_reg xeon_pri_xlat = { + /* Note: no primary .bar0_base visible to the secondary side. + * + * The secondary side cannot get the base address stored in primary + * bars. The base address is necessary to set the limit register to + * any value other than zero, or unlimited. + * + * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the + * window by setting the limit equal to base, nor can it limit the size + * of the memory window by setting the limit to base + size. + */ + .bar2_limit = XEON_PBAR23LMT_OFFSET, + .bar2_xlat = XEON_PBAR23XLAT_OFFSET, +}; + +static const struct intel_ntb_xlat_reg xeon_sec_xlat = { + .bar0_base = XEON_SBAR0BASE_OFFSET, + .bar2_limit = XEON_SBAR23LMT_OFFSET, + .bar2_xlat = XEON_SBAR23XLAT_OFFSET, +}; + +struct intel_b2b_addr xeon_b2b_usd_addr = { + .bar2_addr64 = XEON_B2B_BAR2_ADDR64, + .bar4_addr64 = XEON_B2B_BAR4_ADDR64, + .bar4_addr32 = XEON_B2B_BAR4_ADDR32, + .bar5_addr32 = XEON_B2B_BAR5_ADDR32, +}; + +struct intel_b2b_addr xeon_b2b_dsd_addr = { + .bar2_addr64 = XEON_B2B_BAR2_ADDR64, + .bar4_addr64 = XEON_B2B_BAR4_ADDR64, + .bar4_addr32 = XEON_B2B_BAR4_ADDR32, + .bar5_addr32 = XEON_B2B_BAR5_ADDR32, +}; + +/* operations for primary side of local ntb */ +static const struct ntb_dev_ops intel_ntb_ops = { + .mw_count = intel_ntb_mw_count, + .mw_get_align = intel_ntb_mw_get_align, + .mw_set_trans = intel_ntb_mw_set_trans, + .peer_mw_count = intel_ntb_peer_mw_count, + .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, + .link_is_up = intel_ntb_link_is_up, + .link_enable = intel_ntb_link_enable, + .link_disable = intel_ntb_link_disable, + .db_is_unsafe = intel_ntb_db_is_unsafe, + .db_valid_mask = intel_ntb_db_valid_mask, + .db_vector_count = intel_ntb_db_vector_count, + .db_vector_mask = intel_ntb_db_vector_mask, + .db_read = intel_ntb_db_read, + .db_clear = intel_ntb_db_clear, + .db_set_mask = intel_ntb_db_set_mask, + .db_clear_mask = intel_ntb_db_clear_mask, + .peer_db_addr = intel_ntb_peer_db_addr, + .peer_db_set = intel_ntb_peer_db_set, + .spad_is_unsafe = intel_ntb_spad_is_unsafe, + .spad_count = intel_ntb_spad_count, + .spad_read = intel_ntb_spad_read, + .spad_write = intel_ntb_spad_write, + .peer_spad_addr = intel_ntb_peer_spad_addr, + .peer_spad_read = intel_ntb_peer_spad_read, + .peer_spad_write = intel_ntb_peer_spad_write, +}; + +static const struct file_operations intel_ntb_debugfs_info = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ndev_debugfs_read, +}; + +static const struct pci_device_id intel_ntb_pci_tbl[] = { + /* GEN1 */ + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)}, + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)}, + + /* GEN3 */ + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)}, + + /* GEN4 */ + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)}, + /* GEN5 PCIe */ + {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)}, + {0} +}; +MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl); + +static struct pci_driver intel_ntb_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = intel_ntb_pci_tbl, + .probe = intel_ntb_pci_probe, + .remove = intel_ntb_pci_remove, +}; + +static int __init intel_ntb_pci_driver_init(void) +{ + int ret; + pr_info("%s %s\n", NTB_DESC, NTB_VER); + + if (debugfs_initialized()) + debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + ret = pci_register_driver(&intel_ntb_pci_driver); + if (ret) + debugfs_remove_recursive(debugfs_dir); + + return ret; +} +module_init(intel_ntb_pci_driver_init); + +static void __exit intel_ntb_pci_driver_exit(void) +{ + pci_unregister_driver(&intel_ntb_pci_driver); + + debugfs_remove_recursive(debugfs_dir); +} +module_exit(intel_ntb_pci_driver_exit); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h new file mode 100644 index 0000000000..344249fc18 --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h @@ -0,0 +1,185 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright(c) 2012-2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NTB_INTEL_GEN1_H_ +#define _NTB_INTEL_GEN1_H_ + +#include "ntb_hw_intel.h" + +/* Intel Gen1 Xeon hardware */ +#define XEON_PBAR23LMT_OFFSET 0x0000 +#define XEON_PBAR45LMT_OFFSET 0x0008 +#define XEON_PBAR4LMT_OFFSET 0x0008 +#define XEON_PBAR5LMT_OFFSET 0x000c +#define XEON_PBAR23XLAT_OFFSET 0x0010 +#define XEON_PBAR45XLAT_OFFSET 0x0018 +#define XEON_PBAR4XLAT_OFFSET 0x0018 +#define XEON_PBAR5XLAT_OFFSET 0x001c +#define XEON_SBAR23LMT_OFFSET 0x0020 +#define XEON_SBAR45LMT_OFFSET 0x0028 +#define XEON_SBAR4LMT_OFFSET 0x0028 +#define XEON_SBAR5LMT_OFFSET 0x002c +#define XEON_SBAR23XLAT_OFFSET 0x0030 +#define XEON_SBAR45XLAT_OFFSET 0x0038 +#define XEON_SBAR4XLAT_OFFSET 0x0038 +#define XEON_SBAR5XLAT_OFFSET 0x003c +#define XEON_SBAR0BASE_OFFSET 0x0040 +#define XEON_SBAR23BASE_OFFSET 0x0048 +#define XEON_SBAR45BASE_OFFSET 0x0050 +#define XEON_SBAR4BASE_OFFSET 0x0050 +#define XEON_SBAR5BASE_OFFSET 0x0054 +#define XEON_SBDF_OFFSET 0x005c +#define XEON_NTBCNTL_OFFSET 0x0058 +#define XEON_PDOORBELL_OFFSET 0x0060 +#define XEON_PDBMSK_OFFSET 0x0062 +#define XEON_SDOORBELL_OFFSET 0x0064 +#define XEON_SDBMSK_OFFSET 0x0066 +#define XEON_USMEMMISS_OFFSET 0x0070 +#define XEON_SPAD_OFFSET 0x0080 +#define XEON_PBAR23SZ_OFFSET 0x00d0 +#define XEON_PBAR45SZ_OFFSET 0x00d1 +#define XEON_PBAR4SZ_OFFSET 0x00d1 +#define XEON_SBAR23SZ_OFFSET 0x00d2 +#define XEON_SBAR45SZ_OFFSET 0x00d3 +#define XEON_SBAR4SZ_OFFSET 0x00d3 +#define XEON_PPD_OFFSET 0x00d4 +#define XEON_PBAR5SZ_OFFSET 0x00d5 +#define XEON_SBAR5SZ_OFFSET 0x00d6 +#define XEON_WCCNTRL_OFFSET 0x00e0 +#define XEON_UNCERRSTS_OFFSET 0x014c +#define XEON_CORERRSTS_OFFSET 0x0158 +#define XEON_LINK_STATUS_OFFSET 0x01a2 +#define XEON_SPCICMD_OFFSET 0x0504 +#define XEON_DEVCTRL_OFFSET 0x0598 +#define XEON_DEVSTS_OFFSET 0x059a +#define XEON_SLINK_STATUS_OFFSET 0x05a2 +#define XEON_B2B_SPAD_OFFSET 0x0100 +#define XEON_B2B_DOORBELL_OFFSET 0x0140 +#define XEON_B2B_XLAT_OFFSETL 0x0144 +#define XEON_B2B_XLAT_OFFSETU 0x0148 +#define XEON_PPD_CONN_MASK 0x03 +#define XEON_PPD_CONN_TRANSPARENT 0x00 +#define XEON_PPD_CONN_B2B 0x01 +#define XEON_PPD_CONN_RP 0x02 +#define XEON_PPD_DEV_MASK 0x10 +#define XEON_PPD_DEV_USD 0x00 +#define XEON_PPD_DEV_DSD 0x10 +#define XEON_PPD_SPLIT_BAR_MASK 0x40 + +#define XEON_PPD_TOPO_MASK (XEON_PPD_CONN_MASK | XEON_PPD_DEV_MASK) +#define XEON_PPD_TOPO_PRI_USD (XEON_PPD_CONN_RP | XEON_PPD_DEV_USD) +#define XEON_PPD_TOPO_PRI_DSD (XEON_PPD_CONN_RP | XEON_PPD_DEV_DSD) +#define XEON_PPD_TOPO_SEC_USD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_USD) +#define XEON_PPD_TOPO_SEC_DSD (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_DSD) +#define XEON_PPD_TOPO_B2B_USD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_USD) +#define XEON_PPD_TOPO_B2B_DSD (XEON_PPD_CONN_B2B | XEON_PPD_DEV_DSD) + +#define XEON_MW_COUNT 2 +#define HSX_SPLIT_BAR_MW_COUNT 3 +#define XEON_DB_COUNT 15 +#define XEON_DB_LINK 15 +#define XEON_DB_LINK_BIT BIT_ULL(XEON_DB_LINK) +#define XEON_DB_MSIX_VECTOR_COUNT 4 +#define XEON_DB_MSIX_VECTOR_SHIFT 5 +#define XEON_DB_TOTAL_SHIFT 16 +#define XEON_SPAD_COUNT 16 + +/* Use the following addresses for translation between b2b ntb devices in case + * the hardware default values are not reliable. */ +#define XEON_B2B_BAR0_ADDR 0x1000000000000000ull +#define XEON_B2B_BAR2_ADDR64 0x2000000000000000ull +#define XEON_B2B_BAR4_ADDR64 0x4000000000000000ull +#define XEON_B2B_BAR4_ADDR32 0x20000000u +#define XEON_B2B_BAR5_ADDR32 0x40000000u + +/* The peer ntb secondary config space is 32KB fixed size */ +#define XEON_B2B_MIN_SIZE 0x8000 + +/* flags to indicate hardware errata */ +#define NTB_HWERR_SDOORBELL_LOCKUP BIT_ULL(0) +#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1) +#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2) +#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3) +#define NTB_HWERR_BAR_ALIGN BIT_ULL(4) +#define NTB_HWERR_LTR_BAD BIT_ULL(5) + +extern struct intel_b2b_addr xeon_b2b_usd_addr; +extern struct intel_b2b_addr xeon_b2b_dsd_addr; + +int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max, + int msix_shift, int total_shift); +enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); +void ndev_db_addr(struct intel_ntb_dev *ndev, + phys_addr_t *db_addr, resource_size_t *db_size, + phys_addr_t reg_addr, unsigned long reg); +u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio); +int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, + void __iomem *mmio); +int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx); +int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx); +int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, resource_size_t *size_align, + resource_size_t *size_max); +int intel_ntb_peer_mw_count(struct ntb_dev *ntb); +int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size); +u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, + enum ntb_width *width); +int intel_ntb_link_disable(struct ntb_dev *ntb); +u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb); +int intel_ntb_db_vector_count(struct ntb_dev *ntb); +u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector); +int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits); +int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits); +int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb); +int intel_ntb_spad_count(struct ntb_dev *ntb); +u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx); +int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val); +u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx); +int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, + u32 val); +int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, + phys_addr_t *spad_addr); +int xeon_link_is_up(struct intel_ntb_dev *ndev); + +#endif diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c new file mode 100644 index 0000000000..ffcfc3e02c --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c @@ -0,0 +1,627 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Intel PCIe GEN3 NTB Linux driver + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ntb_hw_intel.h" +#include "ntb_hw_gen1.h" +#include "ntb_hw_gen3.h" + +static int gen3_poll_link(struct intel_ntb_dev *ndev); + +static const struct intel_ntb_reg gen3_reg = { + .poll_link = gen3_poll_link, + .link_is_up = xeon_link_is_up, + .db_ioread = gen3_db_ioread, + .db_iowrite = gen3_db_iowrite, + .db_size = sizeof(u32), + .ntb_ctl = GEN3_NTBCNTL_OFFSET, + .mw_bar = {2, 4}, +}; + +static const struct intel_ntb_alt_reg gen3_pri_reg = { + .db_bell = GEN3_EM_DOORBELL_OFFSET, + .db_clear = GEN3_IM_INT_STATUS_OFFSET, + .db_mask = GEN3_IM_INT_DISABLE_OFFSET, + .spad = GEN3_IM_SPAD_OFFSET, +}; + +static const struct intel_ntb_alt_reg gen3_b2b_reg = { + .db_bell = GEN3_IM_DOORBELL_OFFSET, + .db_clear = GEN3_EM_INT_STATUS_OFFSET, + .db_mask = GEN3_EM_INT_DISABLE_OFFSET, + .spad = GEN3_B2B_SPAD_OFFSET, +}; + +static const struct intel_ntb_xlat_reg gen3_sec_xlat = { +/* .bar0_base = GEN3_EMBAR0_OFFSET, */ + .bar2_limit = GEN3_IMBAR1XLMT_OFFSET, + .bar2_xlat = GEN3_IMBAR1XBASE_OFFSET, +}; + +static int gen3_poll_link(struct intel_ntb_dev *ndev) +{ + u16 reg_val; + int rc; + + ndev->reg->db_iowrite(ndev->db_link_mask, + ndev->self_mmio + + ndev->self_reg->db_clear); + + rc = pci_read_config_word(ndev->ntb.pdev, + GEN3_LINK_STATUS_OFFSET, ®_val); + if (rc) + return 0; + + if (reg_val == ndev->lnk_sta) + return 0; + + ndev->lnk_sta = reg_val; + + return 1; +} + +static int gen3_init_isr(struct intel_ntb_dev *ndev) +{ + int i; + + /* + * The MSIX vectors and the interrupt status bits are not lined up + * on Skylake. By default the link status bit is bit 32, however it + * is by default MSIX vector0. We need to fixup to line them up. + * The vectors at reset is 1-32,0. We need to reprogram to 0-32. + */ + + for (i = 0; i < GEN3_DB_MSIX_VECTOR_COUNT; i++) + iowrite8(i, ndev->self_mmio + GEN3_INTVEC_OFFSET + i); + + /* move link status down one as workaround */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) { + iowrite8(GEN3_DB_MSIX_VECTOR_COUNT - 2, + ndev->self_mmio + GEN3_INTVEC_OFFSET + + (GEN3_DB_MSIX_VECTOR_COUNT - 1)); + } + + return ndev_init_isr(ndev, GEN3_DB_MSIX_VECTOR_COUNT, + GEN3_DB_MSIX_VECTOR_COUNT, + GEN3_DB_MSIX_VECTOR_SHIFT, + GEN3_DB_TOTAL_SHIFT); +} + +static int gen3_setup_b2b_mw(struct intel_ntb_dev *ndev, + const struct intel_b2b_addr *addr, + const struct intel_b2b_addr *peer_addr) +{ + struct pci_dev *pdev; + void __iomem *mmio; + phys_addr_t bar_addr; + + pdev = ndev->ntb.pdev; + mmio = ndev->self_mmio; + + /* setup incoming bar limits == base addrs (zero length windows) */ + bar_addr = addr->bar2_addr64; + iowrite64(bar_addr, mmio + GEN3_IMBAR1XLMT_OFFSET); + bar_addr = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET); + dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); + + bar_addr = addr->bar4_addr64; + iowrite64(bar_addr, mmio + GEN3_IMBAR2XLMT_OFFSET); + bar_addr = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET); + dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); + + /* zero incoming translation addrs */ + iowrite64(0, mmio + GEN3_IMBAR1XBASE_OFFSET); + iowrite64(0, mmio + GEN3_IMBAR2XBASE_OFFSET); + + ndev->peer_mmio = ndev->self_mmio; + + return 0; +} + +static int gen3_init_ntb(struct intel_ntb_dev *ndev) +{ + int rc; + + + ndev->mw_count = XEON_MW_COUNT; + ndev->spad_count = GEN3_SPAD_COUNT; + ndev->db_count = GEN3_DB_COUNT; + ndev->db_link_mask = GEN3_DB_LINK_BIT; + + /* DB fixup for using 31 right now */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) + ndev->db_link_mask |= BIT_ULL(31); + + switch (ndev->ntb.topo) { + case NTB_TOPO_B2B_USD: + case NTB_TOPO_B2B_DSD: + ndev->self_reg = &gen3_pri_reg; + ndev->peer_reg = &gen3_b2b_reg; + ndev->xlat_reg = &gen3_sec_xlat; + + if (ndev->ntb.topo == NTB_TOPO_B2B_USD) { + rc = gen3_setup_b2b_mw(ndev, + &xeon_b2b_dsd_addr, + &xeon_b2b_usd_addr); + } else { + rc = gen3_setup_b2b_mw(ndev, + &xeon_b2b_usd_addr, + &xeon_b2b_dsd_addr); + } + + if (rc) + return rc; + + /* Enable Bus Master and Memory Space on the secondary side */ + iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, + ndev->self_mmio + GEN3_SPCICMD_OFFSET); + + break; + + default: + return -EINVAL; + } + + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + ndev->reg->db_iowrite(ndev->db_valid_mask, + ndev->self_mmio + + ndev->self_reg->db_mask); + + return 0; +} + +int gen3_init_dev(struct intel_ntb_dev *ndev) +{ + struct pci_dev *pdev; + u8 ppd; + int rc; + + pdev = ndev->ntb.pdev; + + ndev->reg = &gen3_reg; + + rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd); + if (rc) + return -EIO; + + ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); + dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd, + ntb_topo_string(ndev->ntb.topo)); + if (ndev->ntb.topo == NTB_TOPO_NONE) + return -EINVAL; + + ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD; + + rc = gen3_init_ntb(ndev); + if (rc) + return rc; + + return gen3_init_isr(ndev); +} + +ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct intel_ntb_dev *ndev; + void __iomem *mmio; + char *buf; + size_t buf_size; + ssize_t ret, off; + union { u64 v64; u32 v32; u16 v16; } u; + + ndev = filp->private_data; + mmio = ndev->self_mmio; + + buf_size = min(count, 0x800ul); + + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + off = 0; + + off += scnprintf(buf + off, buf_size - off, + "NTB Device Information:\n"); + + off += scnprintf(buf + off, buf_size - off, + "Connection Topology -\t%s\n", + ntb_topo_string(ndev->ntb.topo)); + + off += scnprintf(buf + off, buf_size - off, + "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); + off += scnprintf(buf + off, buf_size - off, + "LNK STA -\t\t%#06x\n", ndev->lnk_sta); + + if (!ndev->reg->link_is_up(ndev)) + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tDown\n"); + else { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tUp\n"); + off += scnprintf(buf + off, buf_size - off, + "Link Speed -\t\tPCI-E Gen %u\n", + NTB_LNK_STA_SPEED(ndev->lnk_sta)); + off += scnprintf(buf + off, buf_size - off, + "Link Width -\t\tx%u\n", + NTB_LNK_STA_WIDTH(ndev->lnk_sta)); + } + + off += scnprintf(buf + off, buf_size - off, + "Memory Window Count -\t%u\n", ndev->mw_count); + off += scnprintf(buf + off, buf_size - off, + "Scratchpad Count -\t%u\n", ndev->spad_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Count -\t%u\n", ndev->db_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); + + off += scnprintf(buf + off, buf_size - off, + "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask -\t\t%#llx\n", u.v64); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Bell -\t\t%#llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Incoming XLAT:\n"); + + u.v64 = ioread64(mmio + GEN3_IMBAR1XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR1XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_IMBAR2XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR2XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_IMBAR1XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_IMBAR2XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64); + + if (ntb_topo_is_b2b(ndev->ntb.topo)) { + off += scnprintf(buf + off, buf_size - off, + "\nNTB Outgoing B2B XLAT:\n"); + + u.v64 = ioread64(mmio + GEN3_EMBAR1XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR1XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_EMBAR2XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR2XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_EMBAR1XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR1XLMT -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_EMBAR2XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR2XLMT -\t\t%#018llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Secondary BAR:\n"); + + u.v64 = ioread64(mmio + GEN3_EMBAR0_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR0 -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_EMBAR1_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR1 -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN3_EMBAR2_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "EMBAR2 -\t\t%#018llx\n", u.v64); + } + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Statistics:\n"); + + u.v16 = ioread16(mmio + GEN3_USMEMMISS_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "Upstream Memory Miss -\t%u\n", u.v16); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Hardware Errors:\n"); + + if (!pci_read_config_word(ndev->ntb.pdev, + GEN3_DEVSTS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "DEVSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_word(ndev->ntb.pdev, + GEN3_LINK_STATUS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "LNKSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_dword(ndev->ntb.pdev, + GEN3_UNCERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "UNCERRSTS -\t\t%#06x\n", u.v32); + + if (!pci_read_config_dword(ndev->ntb.pdev, + GEN3_CORERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "CORERRSTS -\t\t%#06x\n", u.v32); + + ret = simple_read_from_buffer(ubuf, count, offp, buf, off); + kfree(buf); + return ret; +} + +int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed, + enum ntb_width max_width) +{ + struct intel_ntb_dev *ndev; + u32 ntb_ctl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + dev_dbg(&ntb->pdev->dev, + "Enabling link with max_speed %d max_width %d\n", + max_speed, max_width); + + if (max_speed != NTB_SPEED_AUTO) + dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed); + if (max_width != NTB_WIDTH_AUTO) + dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width); + + ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); + ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); + ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP; + ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP; + iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); + + return 0; +} +static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + unsigned long xlat_reg, limit_reg; + resource_size_t bar_size, mw_size; + void __iomem *mmio; + u64 base, limit, reg_val; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + /* hardware requires that addr is aligned to bar size */ + if (addr & (bar_size - 1)) + return -EINVAL; + + /* make sure the range fits in the usable mw size */ + if (size > mw_size) + return -EINVAL; + + mmio = ndev->self_mmio; + xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10); + limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10); + base = pci_resource_start(ndev->ntb.pdev, bar); + + /* Set the limit if supported, if size is not mw_size */ + if (limit_reg && size != mw_size) + limit = base + size; + else + limit = base + mw_size; + + /* set and verify setting the translation address */ + iowrite64(addr, mmio + xlat_reg); + reg_val = ioread64(mmio + xlat_reg); + if (reg_val != addr) { + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val); + + /* set and verify setting the limit */ + iowrite64(limit, mmio + limit_reg); + reg_val = ioread64(mmio + limit_reg); + if (reg_val != limit) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val); + + /* setup the EP */ + limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000; + base = ioread64(mmio + GEN3_EMBAR1_OFFSET + (8 * idx)); + base &= ~0xf; + + if (limit_reg && size != mw_size) + limit = base + size; + else + limit = base + mw_size; + + /* set and verify setting the limit */ + iowrite64(limit, mmio + limit_reg); + reg_val = ioread64(mmio + limit_reg); + if (reg_val != limit) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val); + + return 0; +} + +int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, + u64 *db_data, int db_bit) +{ + phys_addr_t db_addr_base; + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + + if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask)) + return -EINVAL; + + ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr, + ndev->peer_reg->db_bell); + + if (db_addr) { + *db_addr = db_addr_base + (db_bit * 4); + dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n", + *db_addr, db_bit); + } + + if (db_data) { + *db_data = 1; + dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n", + *db_data, db_bit); + } + + return 0; +} + +int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + int bit; + + if (db_bits & ~ndev->db_valid_mask) + return -EINVAL; + + while (db_bits) { + bit = __ffs(db_bits); + iowrite32(1, ndev->peer_mmio + + ndev->peer_reg->db_bell + (bit * 4)); + db_bits &= db_bits - 1; + } + + return 0; +} + +u64 intel_ntb3_db_read(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_read(ndev, + ndev->self_mmio + + ndev->self_reg->db_clear); +} + +int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + + return ndev_db_write(ndev, db_bits, + ndev->self_mmio + + ndev->self_reg->db_clear); +} + +const struct ntb_dev_ops intel_ntb3_ops = { + .mw_count = intel_ntb_mw_count, + .mw_get_align = intel_ntb_mw_get_align, + .mw_set_trans = intel_ntb3_mw_set_trans, + .peer_mw_count = intel_ntb_peer_mw_count, + .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, + .link_is_up = intel_ntb_link_is_up, + .link_enable = intel_ntb3_link_enable, + .link_disable = intel_ntb_link_disable, + .db_valid_mask = intel_ntb_db_valid_mask, + .db_vector_count = intel_ntb_db_vector_count, + .db_vector_mask = intel_ntb_db_vector_mask, + .db_read = intel_ntb3_db_read, + .db_clear = intel_ntb3_db_clear, + .db_set_mask = intel_ntb_db_set_mask, + .db_clear_mask = intel_ntb_db_clear_mask, + .peer_db_addr = intel_ntb3_peer_db_addr, + .peer_db_set = intel_ntb3_peer_db_set, + .spad_is_unsafe = intel_ntb_spad_is_unsafe, + .spad_count = intel_ntb_spad_count, + .spad_read = intel_ntb_spad_read, + .spad_write = intel_ntb_spad_write, + .peer_spad_addr = intel_ntb_peer_spad_addr, + .peer_spad_read = intel_ntb_peer_spad_read, + .peer_spad_write = intel_ntb_peer_spad_write, +}; + diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h new file mode 100644 index 0000000000..dea9398994 --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h @@ -0,0 +1,118 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright(c) 2012-2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NTB_INTEL_GEN3_H_ +#define _NTB_INTEL_GEN3_H_ + +#include "ntb_hw_intel.h" + +/* Intel Skylake Xeon hardware */ +#define GEN3_IMBAR1SZ_OFFSET 0x00d0 +#define GEN3_IMBAR2SZ_OFFSET 0x00d1 +#define GEN3_EMBAR1SZ_OFFSET 0x00d2 +#define GEN3_EMBAR2SZ_OFFSET 0x00d3 +#define GEN3_DEVCTRL_OFFSET 0x0098 +#define GEN3_DEVSTS_OFFSET 0x009a +#define GEN3_UNCERRSTS_OFFSET 0x014c +#define GEN3_CORERRSTS_OFFSET 0x0158 +#define GEN3_LINK_STATUS_OFFSET 0x01a2 + +#define GEN3_NTBCNTL_OFFSET 0x0000 +#define GEN3_IMBAR1XBASE_OFFSET 0x0010 /* SBAR2XLAT */ +#define GEN3_IMBAR1XLMT_OFFSET 0x0018 /* SBAR2LMT */ +#define GEN3_IMBAR2XBASE_OFFSET 0x0020 /* SBAR4XLAT */ +#define GEN3_IMBAR2XLMT_OFFSET 0x0028 /* SBAR4LMT */ +#define GEN3_IM_INT_STATUS_OFFSET 0x0040 +#define GEN3_IM_INT_DISABLE_OFFSET 0x0048 +#define GEN3_IM_SPAD_OFFSET 0x0080 /* SPAD */ +#define GEN3_USMEMMISS_OFFSET 0x0070 +#define GEN3_INTVEC_OFFSET 0x00d0 +#define GEN3_IM_DOORBELL_OFFSET 0x0100 /* SDOORBELL0 */ +#define GEN3_B2B_SPAD_OFFSET 0x0180 /* B2B SPAD */ +#define GEN3_EMBAR0XBASE_OFFSET 0x4008 /* B2B_XLAT */ +#define GEN3_EMBAR1XBASE_OFFSET 0x4010 /* PBAR2XLAT */ +#define GEN3_EMBAR1XLMT_OFFSET 0x4018 /* PBAR2LMT */ +#define GEN3_EMBAR2XBASE_OFFSET 0x4020 /* PBAR4XLAT */ +#define GEN3_EMBAR2XLMT_OFFSET 0x4028 /* PBAR4LMT */ +#define GEN3_EM_INT_STATUS_OFFSET 0x4040 +#define GEN3_EM_INT_DISABLE_OFFSET 0x4048 +#define GEN3_EM_SPAD_OFFSET 0x4080 /* remote SPAD */ +#define GEN3_EM_DOORBELL_OFFSET 0x4100 /* PDOORBELL0 */ +#define GEN3_SPCICMD_OFFSET 0x4504 /* SPCICMD */ +#define GEN3_EMBAR0_OFFSET 0x4510 /* SBAR0BASE */ +#define GEN3_EMBAR1_OFFSET 0x4518 /* SBAR23BASE */ +#define GEN3_EMBAR2_OFFSET 0x4520 /* SBAR45BASE */ + +#define GEN3_DB_COUNT 32 +#define GEN3_DB_LINK 32 +#define GEN3_DB_LINK_BIT BIT_ULL(GEN3_DB_LINK) +#define GEN3_DB_MSIX_VECTOR_COUNT 33 +#define GEN3_DB_MSIX_VECTOR_SHIFT 1 +#define GEN3_DB_TOTAL_SHIFT 33 +#define GEN3_SPAD_COUNT 16 + +static inline u64 gen3_db_ioread(const void __iomem *mmio) +{ + return ioread64(mmio); +} + +static inline void gen3_db_iowrite(u64 bits, void __iomem *mmio) +{ + iowrite64(bits, mmio); +} + +ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp); +int gen3_init_dev(struct intel_ntb_dev *ndev); +int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed, + enum ntb_width max_width); +u64 intel_ntb3_db_read(struct ntb_dev *ntb); +int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits); +int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits); +int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, + resource_size_t *db_size, + u64 *db_data, int db_bit); + +extern const struct ntb_dev_ops intel_ntb3_ops; + +#endif diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c new file mode 100644 index 0000000000..22cac7975b --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ntb_hw_intel.h" +#include "ntb_hw_gen1.h" +#include "ntb_hw_gen3.h" +#include "ntb_hw_gen4.h" + +static int gen4_poll_link(struct intel_ntb_dev *ndev); +static int gen4_link_is_up(struct intel_ntb_dev *ndev); + +static const struct intel_ntb_reg gen4_reg = { + .poll_link = gen4_poll_link, + .link_is_up = gen4_link_is_up, + .db_ioread = gen3_db_ioread, + .db_iowrite = gen3_db_iowrite, + .db_size = sizeof(u32), + .ntb_ctl = GEN4_NTBCNTL_OFFSET, + .mw_bar = {2, 4}, +}; + +static const struct intel_ntb_alt_reg gen4_pri_reg = { + .db_clear = GEN4_IM_INT_STATUS_OFFSET, + .db_mask = GEN4_IM_INT_DISABLE_OFFSET, + .spad = GEN4_IM_SPAD_OFFSET, +}; + +static const struct intel_ntb_xlat_reg gen4_sec_xlat = { + .bar2_limit = GEN4_IM23XLMT_OFFSET, + .bar2_xlat = GEN4_IM23XBASE_OFFSET, + .bar2_idx = GEN4_IM23XBASEIDX_OFFSET, +}; + +static const struct intel_ntb_alt_reg gen4_b2b_reg = { + .db_bell = GEN4_IM_DOORBELL_OFFSET, + .spad = GEN4_EM_SPAD_OFFSET, +}; + +static int gen4_poll_link(struct intel_ntb_dev *ndev) +{ + u16 reg_val; + + /* + * We need to write to DLLSCS bit in the SLOTSTS before we + * can clear the hardware link interrupt on ICX NTB. + */ + iowrite16(GEN4_SLOTSTS_DLLSCS, ndev->self_mmio + GEN4_SLOTSTS); + ndev->reg->db_iowrite(ndev->db_link_mask, + ndev->self_mmio + + ndev->self_reg->db_clear); + + reg_val = ioread16(ndev->self_mmio + GEN4_LINK_STATUS_OFFSET); + if (reg_val == ndev->lnk_sta) + return 0; + + ndev->lnk_sta = reg_val; + + return 1; +} + +static int gen4_link_is_up(struct intel_ntb_dev *ndev) +{ + return NTB_LNK_STA_ACTIVE(ndev->lnk_sta); +} + +static int gen4_init_isr(struct intel_ntb_dev *ndev) +{ + int i; + + /* + * The MSIX vectors and the interrupt status bits are not lined up + * on Gen3 (Skylake) and Gen4. By default the link status bit is bit + * 32, however it is by default MSIX vector0. We need to fixup to + * line them up. The vectors at reset is 1-32,0. We need to reprogram + * to 0-32. + */ + for (i = 0; i < GEN4_DB_MSIX_VECTOR_COUNT; i++) + iowrite8(i, ndev->self_mmio + GEN4_INTVEC_OFFSET + i); + + return ndev_init_isr(ndev, GEN4_DB_MSIX_VECTOR_COUNT, + GEN4_DB_MSIX_VECTOR_COUNT, + GEN4_DB_MSIX_VECTOR_SHIFT, + GEN4_DB_TOTAL_SHIFT); +} + +static int gen4_setup_b2b_mw(struct intel_ntb_dev *ndev, + const struct intel_b2b_addr *addr, + const struct intel_b2b_addr *peer_addr) +{ + struct pci_dev *pdev; + void __iomem *mmio; + phys_addr_t bar_addr; + + pdev = ndev->ntb.pdev; + mmio = ndev->self_mmio; + + /* setup incoming bar limits == base addrs (zero length windows) */ + bar_addr = addr->bar2_addr64; + iowrite64(bar_addr, mmio + GEN4_IM23XLMT_OFFSET); + bar_addr = ioread64(mmio + GEN4_IM23XLMT_OFFSET); + dev_dbg(&pdev->dev, "IM23XLMT %#018llx\n", bar_addr); + + bar_addr = addr->bar4_addr64; + iowrite64(bar_addr, mmio + GEN4_IM45XLMT_OFFSET); + bar_addr = ioread64(mmio + GEN4_IM45XLMT_OFFSET); + dev_dbg(&pdev->dev, "IM45XLMT %#018llx\n", bar_addr); + + /* zero incoming translation addrs */ + iowrite64(0, mmio + GEN4_IM23XBASE_OFFSET); + iowrite64(0, mmio + GEN4_IM45XBASE_OFFSET); + + ndev->peer_mmio = ndev->self_mmio; + + return 0; +} + +static int gen4_init_ntb(struct intel_ntb_dev *ndev) +{ + int rc; + + + ndev->mw_count = XEON_MW_COUNT; + ndev->spad_count = GEN4_SPAD_COUNT; + ndev->db_count = GEN4_DB_COUNT; + ndev->db_link_mask = GEN4_DB_LINK_BIT; + + ndev->self_reg = &gen4_pri_reg; + ndev->xlat_reg = &gen4_sec_xlat; + ndev->peer_reg = &gen4_b2b_reg; + + if (ndev->ntb.topo == NTB_TOPO_B2B_USD) + rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_dsd_addr, + &xeon_b2b_usd_addr); + else + rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_usd_addr, + &xeon_b2b_dsd_addr); + if (rc) + return rc; + + ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + + ndev->reg->db_iowrite(ndev->db_valid_mask, + ndev->self_mmio + + ndev->self_reg->db_mask); + + return 0; +} + +static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & GEN4_PPD_TOPO_MASK) { + case GEN4_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case GEN4_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + +static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & SPR_PPD_TOPO_MASK) { + case SPR_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case SPR_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + +int gen4_init_dev(struct intel_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + u32 ppd1/*, ppd0*/; + u16 lnkctl; + int rc; + + ndev->reg = &gen4_reg; + + if (pdev_is_ICX(pdev)) { + ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN; + ndev->hwerr_flags |= NTB_HWERR_LTR_BAD; + } + + ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET); + if (pdev_is_ICX(pdev)) + ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev)) + ndev->ntb.topo = spr_ppd_topo(ndev, ppd1); + dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1, + ntb_topo_string(ndev->ntb.topo)); + if (ndev->ntb.topo == NTB_TOPO_NONE) + return -EINVAL; + + rc = gen4_init_ntb(ndev); + if (rc) + return rc; + + /* init link setup */ + lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE; + iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + + return gen4_init_isr(ndev); +} + +ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct intel_ntb_dev *ndev; + void __iomem *mmio; + char *buf; + size_t buf_size; + ssize_t ret, off; + union { u64 v64; u32 v32; u16 v16; } u; + + ndev = filp->private_data; + mmio = ndev->self_mmio; + + buf_size = min(count, 0x800ul); + + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + off = 0; + + off += scnprintf(buf + off, buf_size - off, + "NTB Device Information:\n"); + + off += scnprintf(buf + off, buf_size - off, + "Connection Topology -\t%s\n", + ntb_topo_string(ndev->ntb.topo)); + + off += scnprintf(buf + off, buf_size - off, + "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl); + off += scnprintf(buf + off, buf_size - off, + "LNK STA (cached) -\t\t%#06x\n", ndev->lnk_sta); + + if (!ndev->reg->link_is_up(ndev)) + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tDown\n"); + else { + off += scnprintf(buf + off, buf_size - off, + "Link Status -\t\tUp\n"); + off += scnprintf(buf + off, buf_size - off, + "Link Speed -\t\tPCI-E Gen %u\n", + NTB_LNK_STA_SPEED(ndev->lnk_sta)); + off += scnprintf(buf + off, buf_size - off, + "Link Width -\t\tx%u\n", + NTB_LNK_STA_WIDTH(ndev->lnk_sta)); + } + + off += scnprintf(buf + off, buf_size - off, + "Memory Window Count -\t%u\n", ndev->mw_count); + off += scnprintf(buf + off, buf_size - off, + "Scratchpad Count -\t%u\n", ndev->spad_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Count -\t%u\n", ndev->db_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Count -\t%u\n", ndev->db_vec_count); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift); + + off += scnprintf(buf + off, buf_size - off, + "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask); + + u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask); + off += scnprintf(buf + off, buf_size - off, + "Doorbell Mask -\t\t%#llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Incoming XLAT:\n"); + + u.v64 = ioread64(mmio + GEN4_IM23XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM23XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN4_IM45XBASE_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM45XBASE -\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN4_IM23XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM23XLMT -\t\t\t%#018llx\n", u.v64); + + u.v64 = ioread64(mmio + GEN4_IM45XLMT_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "IM45XLMT -\t\t\t%#018llx\n", u.v64); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Statistics:\n"); + + off += scnprintf(buf + off, buf_size - off, + "\nNTB Hardware Errors:\n"); + + if (!pci_read_config_word(ndev->ntb.pdev, + GEN4_DEVSTS_OFFSET, &u.v16)) + off += scnprintf(buf + off, buf_size - off, + "DEVSTS -\t\t%#06x\n", u.v16); + + u.v16 = ioread16(mmio + GEN4_LINK_STATUS_OFFSET); + off += scnprintf(buf + off, buf_size - off, + "LNKSTS -\t\t%#06x\n", u.v16); + + if (!pci_read_config_dword(ndev->ntb.pdev, + GEN4_UNCERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "UNCERRSTS -\t\t%#06x\n", u.v32); + + if (!pci_read_config_dword(ndev->ntb.pdev, + GEN4_CORERRSTS_OFFSET, &u.v32)) + off += scnprintf(buf + off, buf_size - off, + "CORERRSTS -\t\t%#06x\n", u.v32); + + ret = simple_read_from_buffer(ubuf, count, offp, buf, off); + kfree(buf); + return ret; +} + +static int intel_ntb4_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + unsigned long xlat_reg, limit_reg, idx_reg; + unsigned short base_idx, reg_val16; + resource_size_t bar_size, mw_size; + void __iomem *mmio; + u64 base, limit, reg_val; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) { + /* hardware requires that addr is aligned to bar size */ + if (addr & (bar_size - 1)) + return -EINVAL; + } else { + if (addr & (PAGE_SIZE - 1)) + return -EINVAL; + } + + /* make sure the range fits in the usable mw size */ + if (size > mw_size) + return -EINVAL; + + mmio = ndev->self_mmio; + xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10); + limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10); + base = pci_resource_start(ndev->ntb.pdev, bar); + + /* Set the limit if supported, if size is not mw_size */ + if (limit_reg && size != mw_size) { + limit = base + size; + base_idx = __ilog2_u64(size); + } else { + limit = base + mw_size; + base_idx = __ilog2_u64(mw_size); + } + + + /* set and verify setting the translation address */ + iowrite64(addr, mmio + xlat_reg); + reg_val = ioread64(mmio + xlat_reg); + if (reg_val != addr) { + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(&ntb->pdev->dev, "BAR %d IMXBASE: %#Lx\n", bar, reg_val); + + /* set and verify setting the limit */ + iowrite64(limit, mmio + limit_reg); + reg_val = ioread64(mmio + limit_reg); + if (reg_val != limit) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + return -EIO; + } + + dev_dbg(&ntb->pdev->dev, "BAR %d IMXLMT: %#Lx\n", bar, reg_val); + + if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) { + idx_reg = ndev->xlat_reg->bar2_idx + (idx * 0x2); + iowrite16(base_idx, mmio + idx_reg); + reg_val16 = ioread16(mmio + idx_reg); + if (reg_val16 != base_idx) { + iowrite64(base, mmio + limit_reg); + iowrite64(0, mmio + xlat_reg); + iowrite16(0, mmio + idx_reg); + return -EIO; + } + dev_dbg(&ntb->pdev->dev, "BAR %d IMBASEIDX: %#x\n", bar, reg_val16); + } + + + return 0; +} + +static int intel_ntb4_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, enum ntb_width max_width) +{ + struct intel_ntb_dev *ndev; + u32 ntb_ctl, ppd0; + u16 lnkctl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + dev_dbg(&ntb->pdev->dev, + "Enabling link with max_speed %d max_width %d\n", + max_speed, max_width); + + if (max_speed != NTB_SPEED_AUTO) + dev_dbg(&ntb->pdev->dev, + "ignoring max_speed %d\n", max_speed); + if (max_width != NTB_WIDTH_AUTO) + dev_dbg(&ntb->pdev->dev, + "ignoring max_width %d\n", max_width); + + if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD)) { + u32 ltr; + + /* Setup active snoop LTR values */ + ltr = NTB_LTR_ACTIVE_REQMNT | NTB_LTR_ACTIVE_VAL | NTB_LTR_ACTIVE_LATSCALE; + /* Setup active non-snoop values */ + ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr; + iowrite32(ltr, ndev->self_mmio + GEN4_LTR_ACTIVE_OFFSET); + + /* Setup idle snoop LTR values */ + ltr = NTB_LTR_IDLE_VAL | NTB_LTR_IDLE_LATSCALE | NTB_LTR_IDLE_REQMNT; + /* Setup idle non-snoop values */ + ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr; + iowrite32(ltr, ndev->self_mmio + GEN4_LTR_IDLE_OFFSET); + + /* setup PCIe LTR to active */ + iowrite8(NTB_LTR_SWSEL_ACTIVE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET); + } + + ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP; + ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP; + iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl); + + lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + lnkctl &= ~GEN4_LINK_CTRL_LINK_DISABLE; + iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + + /* start link training in PPD0 */ + ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET); + ppd0 |= GEN4_PPD_LINKTRN; + iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET); + + /* make sure link training has started */ + ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET); + if (!(ppd0 & GEN4_PPD_LINKTRN)) { + dev_warn(&ntb->pdev->dev, "Link is not training\n"); + return -ENXIO; + } + + ndev->dev_up = 1; + + return 0; +} + +static int intel_ntb4_link_disable(struct ntb_dev *ntb) +{ + struct intel_ntb_dev *ndev; + u32 ntb_cntl; + u16 lnkctl; + + ndev = container_of(ntb, struct intel_ntb_dev, ntb); + + dev_dbg(&ntb->pdev->dev, "Disabling link\n"); + + /* clear the snoop bits */ + ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); + ntb_cntl &= ~(NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP); + ntb_cntl &= ~(NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP); + iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl); + + lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE; + iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET); + + /* set LTR to idle */ + if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD)) + iowrite8(NTB_LTR_SWSEL_IDLE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET); + + ndev->dev_up = 0; + + return 0; +} + +static int intel_ntb4_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + resource_size_t bar_size, mw_size; + int bar; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + bar_size = pci_resource_len(ndev->ntb.pdev, bar); + + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + if (addr_align) { + if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) + *addr_align = pci_resource_len(ndev->ntb.pdev, bar); + else + *addr_align = PAGE_SIZE; + } + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = mw_size; + + return 0; +} + +const struct ntb_dev_ops intel_ntb4_ops = { + .mw_count = intel_ntb_mw_count, + .mw_get_align = intel_ntb4_mw_get_align, + .mw_set_trans = intel_ntb4_mw_set_trans, + .peer_mw_count = intel_ntb_peer_mw_count, + .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, + .link_is_up = intel_ntb_link_is_up, + .link_enable = intel_ntb4_link_enable, + .link_disable = intel_ntb4_link_disable, + .db_valid_mask = intel_ntb_db_valid_mask, + .db_vector_count = intel_ntb_db_vector_count, + .db_vector_mask = intel_ntb_db_vector_mask, + .db_read = intel_ntb3_db_read, + .db_clear = intel_ntb3_db_clear, + .db_set_mask = intel_ntb_db_set_mask, + .db_clear_mask = intel_ntb_db_clear_mask, + .peer_db_addr = intel_ntb3_peer_db_addr, + .peer_db_set = intel_ntb3_peer_db_set, + .spad_is_unsafe = intel_ntb_spad_is_unsafe, + .spad_count = intel_ntb_spad_count, + .spad_read = intel_ntb_spad_read, + .spad_write = intel_ntb_spad_write, + .peer_spad_addr = intel_ntb_peer_spad_addr, + .peer_spad_read = intel_ntb_peer_spad_read, + .peer_spad_write = intel_ntb_peer_spad_write, +}; + diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h new file mode 100644 index 0000000000..f91323eaf5 --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ +#ifndef _NTB_INTEL_GEN4_H_ +#define _NTB_INTEL_GEN4_H_ + +#include "ntb_hw_intel.h" + +/* Supported PCI device revision range for ICX */ +#define PCI_DEVICE_REVISION_ICX_MIN 0x2 +#define PCI_DEVICE_REVISION_ICX_MAX 0xF + +/* Intel Gen4 NTB hardware */ +/* PCIe config space */ +#define GEN4_IMBAR23SZ_OFFSET 0x00c4 +#define GEN4_IMBAR45SZ_OFFSET 0x00c5 +#define GEN4_EMBAR23SZ_OFFSET 0x00c6 +#define GEN4_EMBAR45SZ_OFFSET 0x00c7 +#define GEN4_DEVCTRL_OFFSET 0x0048 +#define GEN4_DEVSTS_OFFSET 0x004a +#define GEN4_UNCERRSTS_OFFSET 0x0104 +#define GEN4_CORERRSTS_OFFSET 0x0110 + +/* BAR0 MMIO */ +#define GEN4_NTBCNTL_OFFSET 0x0000 +#define GEN4_IM23XBASE_OFFSET 0x0010 /* IMBAR1XBASE */ +#define GEN4_IM23XLMT_OFFSET 0x0018 /* IMBAR1XLMT */ +#define GEN4_IM45XBASE_OFFSET 0x0020 /* IMBAR2XBASE */ +#define GEN4_IM45XLMT_OFFSET 0x0028 /* IMBAR2XLMT */ +#define GEN4_IM_INT_STATUS_OFFSET 0x0040 +#define GEN4_IM_INT_DISABLE_OFFSET 0x0048 +#define GEN4_INTVEC_OFFSET 0x0050 /* 0-32 vecs */ +#define GEN4_IM23XBASEIDX_OFFSET 0x0074 +#define GEN4_IM45XBASEIDX_OFFSET 0x0076 +#define GEN4_IM_SPAD_OFFSET 0x0080 /* 0-15 SPADs */ +#define GEN4_IM_SPAD_SEM_OFFSET 0x00c0 /* SPAD hw semaphore */ +#define GEN4_IM_SPAD_STICKY_OFFSET 0x00c4 /* sticky SPAD */ +#define GEN4_IM_DOORBELL_OFFSET 0x0100 /* 0-31 doorbells */ +#define GEN4_LTR_SWSEL_OFFSET 0x30ec +#define GEN4_LTR_ACTIVE_OFFSET 0x30f0 +#define GEN4_LTR_IDLE_OFFSET 0x30f4 +#define GEN4_EM_SPAD_OFFSET 0x8080 +/* note, link status is now in MMIO and not config space for NTB */ +#define GEN4_LINK_CTRL_OFFSET 0xb050 +#define GEN4_LINK_STATUS_OFFSET 0xb052 +#define GEN4_PPD0_OFFSET 0xb0d4 +#define GEN4_PPD1_OFFSET 0xb4c0 +#define GEN4_LTSSMSTATEJMP 0xf040 + +#define GEN4_PPD_CLEAR_TRN 0x0001 +#define GEN4_PPD_LINKTRN 0x0008 +#define GEN4_PPD_CONN_MASK 0x0300 +#define SPR_PPD_CONN_MASK 0x0700 +#define GEN4_PPD_CONN_B2B 0x0200 +#define GEN4_PPD_DEV_MASK 0x1000 +#define GEN4_PPD_DEV_DSD 0x1000 +#define GEN4_PPD_DEV_USD 0x0000 +#define SPR_PPD_DEV_MASK 0x4000 +#define SPR_PPD_DEV_DSD 0x4000 +#define SPR_PPD_DEV_USD 0x0000 +#define GEN4_LINK_CTRL_LINK_DISABLE 0x0010 + +#define GEN4_SLOTSTS 0xb05a +#define GEN4_SLOTSTS_DLLSCS 0x100 + +#define GEN4_PPD_TOPO_MASK (GEN4_PPD_CONN_MASK | GEN4_PPD_DEV_MASK) +#define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD) +#define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD) + +#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK) +#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD) +#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD) + +#define GEN4_DB_COUNT 32 +#define GEN4_DB_LINK 32 +#define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK) +#define GEN4_DB_MSIX_VECTOR_COUNT 33 +#define GEN4_DB_MSIX_VECTOR_SHIFT 1 +#define GEN4_DB_TOTAL_SHIFT 33 +#define GEN4_SPAD_COUNT 16 + +#define NTB_CTL_E2I_BAR23_SNOOP 0x000004 +#define NTB_CTL_E2I_BAR23_NOSNOOP 0x000008 +#define NTB_CTL_I2E_BAR23_SNOOP 0x000010 +#define NTB_CTL_I2E_BAR23_NOSNOOP 0x000020 +#define NTB_CTL_E2I_BAR45_SNOOP 0x000040 +#define NTB_CTL_E2I_BAR45_NOSNOO 0x000080 +#define NTB_CTL_I2E_BAR45_SNOOP 0x000100 +#define NTB_CTL_I2E_BAR45_NOSNOOP 0x000200 +#define NTB_CTL_BUSNO_DIS_INC 0x000400 +#define NTB_CTL_LINK_DOWN 0x010000 + +#define NTB_SJC_FORCEDETECT 0x000004 + +#define NTB_LTR_SWSEL_ACTIVE 0x0 +#define NTB_LTR_SWSEL_IDLE 0x1 + +#define NTB_LTR_NS_SHIFT 16 +#define NTB_LTR_ACTIVE_VAL 0x0000 /* 0 us */ +#define NTB_LTR_ACTIVE_LATSCALE 0x0800 /* 1us scale */ +#define NTB_LTR_ACTIVE_REQMNT 0x8000 /* snoop req enable */ + +#define NTB_LTR_IDLE_VAL 0x0258 /* 600 us */ +#define NTB_LTR_IDLE_LATSCALE 0x0800 /* 1us scale */ +#define NTB_LTR_IDLE_REQMNT 0x8000 /* snoop req enable */ + +ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp); +int gen4_init_dev(struct intel_ntb_dev *ndev); +ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp); + +extern const struct ntb_dev_ops intel_ntb4_ops; + +static inline int pdev_is_ICX(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision >= PCI_DEVICE_REVISION_ICX_MIN && + pdev->revision <= PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + +static inline int pdev_is_SPR(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision > PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + +#endif diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h new file mode 100644 index 0000000000..da4d5fe55b --- /dev/null +++ b/drivers/ntb/hw/intel/ntb_hw_intel.h @@ -0,0 +1,238 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * BSD LICENSE + * + * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copy + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Intel PCIe NTB Linux driver + */ + +#ifndef NTB_HW_INTEL_H +#define NTB_HW_INTEL_H + +#include +#include +#include + +/* PCI device IDs */ +#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 +#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726 +#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727 +#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB 0x3C0D +#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB 0x3C0E +#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB 0x3C0F +#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT 0x0E0D +#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT 0x0E0E +#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT 0x0E0F +#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D +#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E +#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F +#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D +#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E +#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F +#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C +#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e +#define PCI_DEVICE_ID_INTEL_NTB_B2B_GNR 0x0db4 + +/* Ntb control and link status */ +#define NTB_CTL_CFG_LOCK BIT(0) +#define NTB_CTL_DISABLE BIT(1) +#define NTB_CTL_S2P_BAR2_SNOOP BIT(2) +#define NTB_CTL_P2S_BAR2_SNOOP BIT(4) +#define NTB_CTL_S2P_BAR4_SNOOP BIT(6) +#define NTB_CTL_P2S_BAR4_SNOOP BIT(8) +#define NTB_CTL_S2P_BAR5_SNOOP BIT(12) +#define NTB_CTL_P2S_BAR5_SNOOP BIT(14) + +#define NTB_LNK_STA_ACTIVE_BIT 0x2000 +#define NTB_LNK_STA_SPEED_MASK 0x000f +#define NTB_LNK_STA_WIDTH_MASK 0x03f0 +#define NTB_LNK_STA_ACTIVE(x) (!!((x) & NTB_LNK_STA_ACTIVE_BIT)) +#define NTB_LNK_STA_SPEED(x) ((x) & NTB_LNK_STA_SPEED_MASK) +#define NTB_LNK_STA_WIDTH(x) (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4) + +/* flags to indicate unsafe api */ +#define NTB_UNSAFE_DB BIT_ULL(0) +#define NTB_UNSAFE_SPAD BIT_ULL(1) + +#define NTB_BAR_MASK_64 ~(0xfull) +#define NTB_BAR_MASK_32 ~(0xfu) + +struct intel_ntb_dev; + +struct intel_ntb_reg { + int (*poll_link)(struct intel_ntb_dev *ndev); + int (*link_is_up)(struct intel_ntb_dev *ndev); + u64 (*db_ioread)(const void __iomem *mmio); + void (*db_iowrite)(u64 db_bits, void __iomem *mmio); + unsigned long ntb_ctl; + resource_size_t db_size; + int mw_bar[]; +}; + +struct intel_ntb_alt_reg { + unsigned long db_bell; + unsigned long db_mask; + unsigned long db_clear; + unsigned long spad; +}; + +struct intel_ntb_xlat_reg { + unsigned long bar0_base; + unsigned long bar2_xlat; + unsigned long bar2_limit; + unsigned short bar2_idx; +}; + +struct intel_b2b_addr { + phys_addr_t bar0_addr; + phys_addr_t bar2_addr64; + phys_addr_t bar4_addr64; + phys_addr_t bar4_addr32; + phys_addr_t bar5_addr32; +}; + +struct intel_ntb_vec { + struct intel_ntb_dev *ndev; + int num; +}; + +struct intel_ntb_dev { + struct ntb_dev ntb; + + /* offset of peer bar0 in b2b bar */ + unsigned long b2b_off; + /* mw idx used to access peer bar0 */ + unsigned int b2b_idx; + + /* BAR45 is split into BAR4 and BAR5 */ + bool bar4_split; + + u32 ntb_ctl; + u32 lnk_sta; + + unsigned char mw_count; + unsigned char spad_count; + unsigned char db_count; + unsigned char db_vec_count; + unsigned char db_vec_shift; + + u64 db_valid_mask; + u64 db_link_mask; + u64 db_mask; + + /* synchronize rmw access of db_mask and hw reg */ + spinlock_t db_mask_lock; + + struct msix_entry *msix; + struct intel_ntb_vec *vec; + + const struct intel_ntb_reg *reg; + const struct intel_ntb_alt_reg *self_reg; + const struct intel_ntb_alt_reg *peer_reg; + const struct intel_ntb_xlat_reg *xlat_reg; + void __iomem *self_mmio; + void __iomem *peer_mmio; + phys_addr_t peer_addr; + + unsigned long last_ts; + struct delayed_work hb_timer; + + unsigned long hwerr_flags; + unsigned long unsafe_flags; + unsigned long unsafe_flags_ignore; + + struct dentry *debugfs_dir; + struct dentry *debugfs_info; + + /* gen4 entries */ + int dev_up; +}; + +#define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb) +#define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \ + hb_timer.work) + +static inline int pdev_is_gen1(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_NTB_SS_JSF: + case PCI_DEVICE_ID_INTEL_NTB_SS_SNB: + case PCI_DEVICE_ID_INTEL_NTB_SS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_SS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_SS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_PS_JSF: + case PCI_DEVICE_ID_INTEL_NTB_PS_SNB: + case PCI_DEVICE_ID_INTEL_NTB_PS_IVT: + case PCI_DEVICE_ID_INTEL_NTB_PS_HSX: + case PCI_DEVICE_ID_INTEL_NTB_PS_BDX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF: + case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB: + case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT: + case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX: + case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX: + return 1; + } + return 0; +} + +static inline int pdev_is_gen3(struct pci_dev *pdev) +{ + if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX) + return 1; + + return 0; +} + +static inline int pdev_is_gen4(struct pci_dev *pdev) +{ + if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_ICX) + return 1; + + return 0; +} + +static inline int pdev_is_gen5(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_GNR; +} + +#endif diff --git a/drivers/ntb/hw/mscc/Kconfig b/drivers/ntb/hw/mscc/Kconfig new file mode 100644 index 0000000000..ea63bd829d --- /dev/null +++ b/drivers/ntb/hw/mscc/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NTB_SWITCHTEC + tristate "MicroSemi Switchtec Non-Transparent Bridge Support" + select PCI_SW_SWITCHTEC + help + Enables NTB support for Switchtec PCI switches. This also + selects the Switchtec management driver as they share the same + hardware interface. + + If unsure, say N. diff --git a/drivers/ntb/hw/mscc/Makefile b/drivers/ntb/hw/mscc/Makefile new file mode 100644 index 0000000000..756a75abee --- /dev/null +++ b/drivers/ntb/hw/mscc/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c new file mode 100644 index 0000000000..d6bbcc7b5b --- /dev/null +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -0,0 +1,1577 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Microsemi Switchtec(tm) PCIe Management Driver + * Copyright (c) 2017, Microsemi Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Microsemi Corporation"); + +static ulong max_mw_size = SZ_2M; +module_param(max_mw_size, ulong, 0644); +MODULE_PARM_DESC(max_mw_size, + "Max memory window size reported to the upper layer"); + +static bool use_lut_mws; +module_param(use_lut_mws, bool, 0644); +MODULE_PARM_DESC(use_lut_mws, + "Enable the use of the LUT based memory windows"); + +#define SWITCHTEC_NTB_MAGIC 0x45CC0001 +#define MAX_MWS 128 + +struct shared_mw { + u32 magic; + u32 link_sta; + u32 partition_id; + u64 mw_sizes[MAX_MWS]; + u32 spad[128]; +}; + +#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry) +#define LUT_SIZE SZ_64K + +struct switchtec_ntb { + struct ntb_dev ntb; + struct switchtec_dev *stdev; + + int self_partition; + int peer_partition; + + int doorbell_irq; + int message_irq; + + struct ntb_info_regs __iomem *mmio_ntb; + struct ntb_ctrl_regs __iomem *mmio_ctrl; + struct ntb_dbmsg_regs __iomem *mmio_dbmsg; + struct ntb_ctrl_regs __iomem *mmio_self_ctrl; + struct ntb_ctrl_regs __iomem *mmio_peer_ctrl; + struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg; + struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg; + + void __iomem *mmio_xlink_win; + + struct shared_mw *self_shared; + struct shared_mw __iomem *peer_shared; + dma_addr_t self_shared_dma; + + u64 db_mask; + u64 db_valid_mask; + int db_shift; + int db_peer_shift; + + /* synchronize rmw access of db_mask and hw reg */ + spinlock_t db_mask_lock; + + int nr_direct_mw; + int nr_lut_mw; + int nr_rsvd_luts; + int direct_mw_to_bar[MAX_DIRECT_MW]; + + int peer_nr_direct_mw; + int peer_nr_lut_mw; + int peer_direct_mw_to_bar[MAX_DIRECT_MW]; + + bool link_is_up; + enum ntb_speed link_speed; + enum ntb_width link_width; + struct work_struct check_link_status_work; + bool link_force_down; +}; + +static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb) +{ + return container_of(ntb, struct switchtec_ntb, ntb); +} + +static int switchtec_ntb_part_op(struct switchtec_ntb *sndev, + struct ntb_ctrl_regs __iomem *ctl, + u32 op, int wait_status) +{ + static const char * const op_text[] = { + [NTB_CTRL_PART_OP_LOCK] = "lock", + [NTB_CTRL_PART_OP_CFG] = "configure", + [NTB_CTRL_PART_OP_RESET] = "reset", + }; + + int i; + u32 ps; + int status; + + switch (op) { + case NTB_CTRL_PART_OP_LOCK: + status = NTB_CTRL_PART_STATUS_LOCKING; + break; + case NTB_CTRL_PART_OP_CFG: + status = NTB_CTRL_PART_STATUS_CONFIGURING; + break; + case NTB_CTRL_PART_OP_RESET: + status = NTB_CTRL_PART_STATUS_RESETTING; + break; + default: + return -EINVAL; + } + + iowrite32(op, &ctl->partition_op); + + for (i = 0; i < 1000; i++) { + if (msleep_interruptible(50) != 0) { + iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op); + return -EINTR; + } + + ps = ioread32(&ctl->partition_status) & 0xFFFF; + + if (ps != status) + break; + } + + if (ps == wait_status) + return 0; + + if (ps == status) { + dev_err(&sndev->stdev->dev, + "Timed out while performing %s (%d). (%08x)\n", + op_text[op], op, + ioread32(&ctl->partition_status)); + + return -ETIMEDOUT; + } + + return -EIO; +} + +static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx, + u32 val) +{ + if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg)) + return -EINVAL; + + iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg); + + return 0; +} + +static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + int nr_direct_mw = sndev->peer_nr_direct_mw; + int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (!use_lut_mws) + nr_lut_mw = 0; + + return nr_direct_mw + nr_lut_mw; +} + +static int lut_index(struct switchtec_ntb *sndev, int mw_idx) +{ + return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts; +} + +static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx) +{ + return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts; +} + +static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, + int widx, resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + int lut; + resource_size_t size; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + lut = widx >= sndev->peer_nr_direct_mw; + size = ioread64(&sndev->peer_shared->mw_sizes[widx]); + + if (size == 0) + return -EINVAL; + + if (addr_align) + *addr_align = lut ? size : SZ_4K; + + if (size_align) + *size_align = lut ? size : SZ_4K; + + if (size_max) + *size_max = size; + + return 0; +} + +static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx) +{ + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + int bar = sndev->peer_direct_mw_to_bar[idx]; + u32 ctl_val; + + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); + ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); + iowrite32(0, &ctl->bar_entry[bar].win_size); + iowrite32(0, &ctl->bar_ext_entry[bar].win_size); + iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); +} + +static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx) +{ + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + + iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]); +} + +static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx, + dma_addr_t addr, resource_size_t size) +{ + int xlate_pos = ilog2(size); + int bar = sndev->peer_direct_mw_to_bar[idx]; + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + u32 ctl_val; + + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); + ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; + + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); + iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000), + &ctl->bar_entry[bar].win_size); + iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size); + iowrite64(sndev->self_partition | addr, + &ctl->bar_entry[bar].xlate_addr); +} + +static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + + iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr), + &ctl->lut_entry[peer_lut_index(sndev, idx)]); +} + +static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, + dma_addr_t addr, resource_size_t size) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + int xlate_pos = ilog2(size); + int nr_direct_mw = sndev->peer_nr_direct_mw; + int rc; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n", + widx, pidx, &addr, &size); + + if (widx >= switchtec_ntb_mw_count(ntb, pidx)) + return -EINVAL; + + if (size != 0 && xlate_pos < 12) + return -EINVAL; + + if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { + /* + * In certain circumstances we can get a buffer that is + * not aligned to its size. (Most of the time + * dma_alloc_coherent ensures this). This can happen when + * using large buffers allocated by the CMA + * (see CMA_CONFIG_ALIGNMENT) + */ + dev_err(&sndev->stdev->dev, + "ERROR: Memory window address is not aligned to its size!\n"); + return -EINVAL; + } + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, + NTB_CTRL_PART_STATUS_LOCKED); + if (rc) + return rc; + + if (size == 0) { + if (widx < nr_direct_mw) + switchtec_ntb_mw_clr_direct(sndev, widx); + else + switchtec_ntb_mw_clr_lut(sndev, widx); + } else { + if (widx < nr_direct_mw) + switchtec_ntb_mw_set_direct(sndev, widx, addr, size); + else + switchtec_ntb_mw_set_lut(sndev, widx, addr, size); + } + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + + if (rc == -EIO) { + dev_err(&sndev->stdev->dev, + "Hardware reported an error configuring mw %d: %08x\n", + widx, ioread32(&ctl->bar_error)); + + if (widx < nr_direct_mw) + switchtec_ntb_mw_clr_direct(sndev, widx); + else + switchtec_ntb_mw_clr_lut(sndev, widx); + + switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + } + + return rc; +} + +static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts; + + return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0); +} + +static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev, + int idx, phys_addr_t *base, + resource_size_t *size) +{ + int bar = sndev->direct_mw_to_bar[idx]; + size_t offset = 0; + + if (bar < 0) + return -EINVAL; + + if (idx == 0) { + /* + * This is the direct BAR shared with the LUTs + * which means the actual window will be offset + * by the size of all the LUT entries. + */ + + offset = LUT_SIZE * sndev->nr_lut_mw; + } + + if (base) + *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; + + if (size) { + *size = pci_resource_len(sndev->ntb.pdev, bar) - offset; + if (offset && *size > offset) + *size = offset; + + if (*size > max_mw_size) + *size = max_mw_size; + } + + return 0; +} + +static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev, + int idx, phys_addr_t *base, + resource_size_t *size) +{ + int bar = sndev->direct_mw_to_bar[0]; + int offset; + + offset = LUT_SIZE * lut_index(sndev, idx); + + if (base) + *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; + + if (size) + *size = LUT_SIZE; + + return 0; +} + +static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, + resource_size_t *size) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (idx < sndev->nr_direct_mw) + return switchtec_ntb_direct_get_addr(sndev, idx, base, size); + else if (idx < switchtec_ntb_peer_mw_count(ntb)) + return switchtec_ntb_lut_get_addr(sndev, idx, base, size); + else + return -EINVAL; +} + +static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev, + int partition, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct switchtec_dev *stdev = sndev->stdev; + struct part_cfg_regs __iomem *part_cfg = + &stdev->mmio_part_cfg_all[partition]; + + u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF; + u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]); + + if (speed) + *speed = (linksta >> 16) & 0xF; + + if (width) + *width = (linksta >> 20) & 0x3F; +} + +static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev) +{ + enum ntb_speed self_speed, peer_speed; + enum ntb_width self_width, peer_width; + + if (!sndev->link_is_up) { + sndev->link_speed = NTB_SPEED_NONE; + sndev->link_width = NTB_WIDTH_NONE; + return; + } + + switchtec_ntb_part_link_speed(sndev, sndev->self_partition, + &self_speed, &self_width); + switchtec_ntb_part_link_speed(sndev, sndev->peer_partition, + &peer_speed, &peer_width); + + sndev->link_speed = min(self_speed, peer_speed); + sndev->link_width = min(self_width, peer_width); +} + +static int crosslink_is_enabled(struct switchtec_ntb *sndev) +{ + struct ntb_info_regs __iomem *inf = sndev->mmio_ntb; + + return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled); +} + +static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev) +{ + int i; + u32 msg_map = 0; + + if (!crosslink_is_enabled(sndev)) + return; + + for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) { + int m = i | sndev->self_partition << 2; + + msg_map |= m << i * 8; + } + + iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map); + iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, + &sndev->mmio_peer_dbmsg->odb_mask); +} + +enum switchtec_msg { + LINK_MESSAGE = 0, + MSG_LINK_UP = 1, + MSG_LINK_DOWN = 2, + MSG_CHECK_LINK = 3, + MSG_LINK_FORCE_DOWN = 4, +}; + +static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev); + +static void switchtec_ntb_link_status_update(struct switchtec_ntb *sndev) +{ + int link_sta; + int old = sndev->link_is_up; + + link_sta = sndev->self_shared->link_sta; + if (link_sta) { + u64 peer = ioread64(&sndev->peer_shared->magic); + + if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC) + link_sta = peer >> 32; + else + link_sta = 0; + } + + sndev->link_is_up = link_sta; + switchtec_ntb_set_link_speed(sndev); + + if (link_sta != old) { + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK); + ntb_link_event(&sndev->ntb); + dev_info(&sndev->stdev->dev, "ntb link %s\n", + link_sta ? "up" : "down"); + + if (link_sta) + crosslink_init_dbmsgs(sndev); + } +} + +static void check_link_status_work(struct work_struct *work) +{ + struct switchtec_ntb *sndev; + + sndev = container_of(work, struct switchtec_ntb, + check_link_status_work); + + if (sndev->link_force_down) { + sndev->link_force_down = false; + switchtec_ntb_reinit_peer(sndev); + + if (sndev->link_is_up) { + sndev->link_is_up = 0; + ntb_link_event(&sndev->ntb); + dev_info(&sndev->stdev->dev, "ntb link forced down\n"); + } + + return; + } + + switchtec_ntb_link_status_update(sndev); +} + +static void switchtec_ntb_check_link(struct switchtec_ntb *sndev, + enum switchtec_msg msg) +{ + if (msg == MSG_LINK_FORCE_DOWN) + sndev->link_force_down = true; + + schedule_work(&sndev->check_link_status_work); +} + +static void switchtec_ntb_link_notification(struct switchtec_dev *stdev) +{ + struct switchtec_ntb *sndev = stdev->sndev; + + switchtec_ntb_check_link(sndev, MSG_CHECK_LINK); +} + +static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (speed) + *speed = sndev->link_speed; + if (width) + *width = sndev->link_width; + + return sndev->link_is_up; +} + +static int switchtec_ntb_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + dev_dbg(&sndev->stdev->dev, "enabling link\n"); + + sndev->self_shared->link_sta = 1; + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); + + switchtec_ntb_link_status_update(sndev); + + return 0; +} + +static int switchtec_ntb_link_disable(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + dev_dbg(&sndev->stdev->dev, "disabling link\n"); + + sndev->self_shared->link_sta = 0; + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN); + + switchtec_ntb_link_status_update(sndev); + + return 0; +} + +static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + return sndev->db_valid_mask; +} + +static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb) +{ + return 1; +} + +static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (db_vector < 0 || db_vector > 1) + return 0; + + return sndev->db_valid_mask; +} + +static u64 switchtec_ntb_db_read(struct ntb_dev *ntb) +{ + u64 ret; + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift; + + return ret & sndev->db_valid_mask; +} + +static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb); + + return 0; +} + +static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + unsigned long irqflags; + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (db_bits & ~sndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&sndev->db_mask_lock, irqflags); + + sndev->db_mask |= db_bits << sndev->db_shift; + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); + + spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); + + return 0; +} + +static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + unsigned long irqflags; + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (db_bits & ~sndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&sndev->db_mask_lock, irqflags); + + sndev->db_mask &= ~(db_bits << sndev->db_shift); + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); + + spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); + + return 0; +} + +static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask; +} + +static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, + phys_addr_t *db_addr, + resource_size_t *db_size, + u64 *db_data, + int db_bit) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + unsigned long offset; + + if (unlikely(db_bit >= BITS_PER_LONG_LONG)) + return -EINVAL; + + offset = (unsigned long)sndev->mmio_peer_dbmsg->odb - + (unsigned long)sndev->stdev->mmio; + + offset += sndev->db_shift / 8; + + if (db_addr) + *db_addr = pci_resource_start(ntb->pdev, 0) + offset; + if (db_size) + *db_size = sizeof(u32); + if (db_data) + *db_data = BIT_ULL(db_bit) << sndev->db_peer_shift; + + return 0; +} + +static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + iowrite64(db_bits << sndev->db_peer_shift, + &sndev->mmio_peer_dbmsg->odb); + + return 0; +} + +static int switchtec_ntb_spad_count(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + return ARRAY_SIZE(sndev->self_shared->spad); +} + +static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) + return 0; + + if (!sndev->self_shared) + return 0; + + return sndev->self_shared->spad[idx]; +} + +static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) + return -EINVAL; + + if (!sndev->self_shared) + return -EIO; + + sndev->self_shared->spad[idx] = val; + + return 0; +} + +static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, + int sidx) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) + return 0; + + if (!sndev->peer_shared) + return 0; + + return ioread32(&sndev->peer_shared->spad[sidx]); +} + +static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, + int sidx, u32 val) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) + return -EINVAL; + + if (!sndev->peer_shared) + return -EIO; + + iowrite32(val, &sndev->peer_shared->spad[sidx]); + + return 0; +} + +static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, + int sidx, phys_addr_t *spad_addr) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + unsigned long offset; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + offset = (unsigned long)&sndev->peer_shared->spad[sidx] - + (unsigned long)sndev->stdev->mmio; + + if (spad_addr) + *spad_addr = pci_resource_start(ntb->pdev, 0) + offset; + + return 0; +} + +static const struct ntb_dev_ops switchtec_ntb_ops = { + .mw_count = switchtec_ntb_mw_count, + .mw_get_align = switchtec_ntb_mw_get_align, + .mw_set_trans = switchtec_ntb_mw_set_trans, + .peer_mw_count = switchtec_ntb_peer_mw_count, + .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr, + .link_is_up = switchtec_ntb_link_is_up, + .link_enable = switchtec_ntb_link_enable, + .link_disable = switchtec_ntb_link_disable, + .db_valid_mask = switchtec_ntb_db_valid_mask, + .db_vector_count = switchtec_ntb_db_vector_count, + .db_vector_mask = switchtec_ntb_db_vector_mask, + .db_read = switchtec_ntb_db_read, + .db_clear = switchtec_ntb_db_clear, + .db_set_mask = switchtec_ntb_db_set_mask, + .db_clear_mask = switchtec_ntb_db_clear_mask, + .db_read_mask = switchtec_ntb_db_read_mask, + .peer_db_addr = switchtec_ntb_peer_db_addr, + .peer_db_set = switchtec_ntb_peer_db_set, + .spad_count = switchtec_ntb_spad_count, + .spad_read = switchtec_ntb_spad_read, + .spad_write = switchtec_ntb_spad_write, + .peer_spad_read = switchtec_ntb_peer_spad_read, + .peer_spad_write = switchtec_ntb_peer_spad_write, + .peer_spad_addr = switchtec_ntb_peer_spad_addr, +}; + +static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) +{ + u64 tpart_vec; + int self; + u64 part_map; + + sndev->ntb.pdev = sndev->stdev->pdev; + sndev->ntb.topo = NTB_TOPO_SWITCH; + sndev->ntb.ops = &switchtec_ntb_ops; + + INIT_WORK(&sndev->check_link_status_work, check_link_status_work); + sndev->link_force_down = false; + + sndev->self_partition = sndev->stdev->partition; + + sndev->mmio_ntb = sndev->stdev->mmio_ntb; + + self = sndev->self_partition; + tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high); + tpart_vec <<= 32; + tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low); + + part_map = ioread64(&sndev->mmio_ntb->ep_map); + tpart_vec &= part_map; + part_map &= ~(1 << sndev->self_partition); + + if (!tpart_vec) { + if (sndev->stdev->partition_count != 2) { + dev_err(&sndev->stdev->dev, + "ntb target partition not defined\n"); + return -ENODEV; + } + + if (!part_map) { + dev_err(&sndev->stdev->dev, + "peer partition is not NT partition\n"); + return -ENODEV; + } + + sndev->peer_partition = __ffs64(part_map); + } else { + if (__ffs64(tpart_vec) != (fls64(tpart_vec) - 1)) { + dev_err(&sndev->stdev->dev, + "ntb driver only supports 1 pair of 1-1 ntb mapping\n"); + return -ENODEV; + } + + sndev->peer_partition = __ffs64(tpart_vec); + if (!(part_map & (1ULL << sndev->peer_partition))) { + dev_err(&sndev->stdev->dev, + "ntb target partition is not NT partition\n"); + return -ENODEV; + } + } + + dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n", + sndev->self_partition, sndev->stdev->partition_count); + + sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb + + SWITCHTEC_NTB_REG_CTRL_OFFSET; + sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb + + SWITCHTEC_NTB_REG_DBMSG_OFFSET; + + sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition]; + sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition]; + sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition]; + sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg; + + return 0; +} + +static int config_rsvd_lut_win(struct switchtec_ntb *sndev, + struct ntb_ctrl_regs __iomem *ctl, + int lut_idx, int partition, u64 addr) +{ + int peer_bar = sndev->peer_direct_mw_to_bar[0]; + u32 ctl_val; + int rc; + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, + NTB_CTRL_PART_STATUS_LOCKED); + if (rc) + return rc; + + ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl); + ctl_val &= 0xFF; + ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN; + ctl_val |= ilog2(LUT_SIZE) << 8; + ctl_val |= (sndev->nr_lut_mw - 1) << 14; + iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl); + + iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr), + &ctl->lut_entry[lut_idx]); + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + if (rc) { + u32 bar_error, lut_error; + + bar_error = ioread32(&ctl->bar_error); + lut_error = ioread32(&ctl->lut_error); + dev_err(&sndev->stdev->dev, + "Error setting up reserved lut window: %08x / %08x\n", + bar_error, lut_error); + return rc; + } + + return 0; +} + +static int config_req_id_table(struct switchtec_ntb *sndev, + struct ntb_ctrl_regs __iomem *mmio_ctrl, + int *req_ids, int count) +{ + int i, rc = 0; + u32 error; + u32 proxy_id; + + if (ioread16(&mmio_ctrl->req_id_table_size) < count) { + dev_err(&sndev->stdev->dev, + "Not enough requester IDs available.\n"); + return -EFAULT; + } + + rc = switchtec_ntb_part_op(sndev, mmio_ctrl, + NTB_CTRL_PART_OP_LOCK, + NTB_CTRL_PART_STATUS_LOCKED); + if (rc) + return rc; + + for (i = 0; i < count; i++) { + iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN, + &mmio_ctrl->req_id_table[i]); + + proxy_id = ioread32(&mmio_ctrl->req_id_table[i]); + dev_dbg(&sndev->stdev->dev, + "Requester ID %02X:%02X.%X -> BB:%02X.%X\n", + req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F, + req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F, + (proxy_id >> 1) & 0x7); + } + + rc = switchtec_ntb_part_op(sndev, mmio_ctrl, + NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + + if (rc == -EIO) { + error = ioread32(&mmio_ctrl->req_id_error); + dev_err(&sndev->stdev->dev, + "Error setting up the requester ID table: %08x\n", + error); + } + + return 0; +} + +static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx, + u64 *mw_addrs, int mw_count) +{ + int rc, i; + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl; + u64 addr; + size_t size, offset; + int bar; + int xlate_pos; + u32 ctl_val; + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, + NTB_CTRL_PART_STATUS_LOCKED); + if (rc) + return rc; + + for (i = 0; i < sndev->nr_lut_mw; i++) { + if (i == ntb_lut_idx) + continue; + + addr = mw_addrs[0] + LUT_SIZE * i; + + iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) | + addr), + &ctl->lut_entry[i]); + } + + sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count); + + for (i = 0; i < sndev->nr_direct_mw; i++) { + bar = sndev->direct_mw_to_bar[i]; + offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0; + addr = mw_addrs[i] + offset; + size = pci_resource_len(sndev->ntb.pdev, bar) - offset; + xlate_pos = ilog2(size); + + if (offset && size > offset) + size = offset; + + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); + ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; + + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); + iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000), + &ctl->bar_entry[bar].win_size); + iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size); + iowrite64(sndev->peer_partition | addr, + &ctl->bar_entry[bar].xlate_addr); + } + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + if (rc) { + u32 bar_error, lut_error; + + bar_error = ioread32(&ctl->bar_error); + lut_error = ioread32(&ctl->lut_error); + dev_err(&sndev->stdev->dev, + "Error setting up cross link windows: %08x / %08x\n", + bar_error, lut_error); + return rc; + } + + return 0; +} + +static int crosslink_setup_req_ids(struct switchtec_ntb *sndev, + struct ntb_ctrl_regs __iomem *mmio_ctrl) +{ + int req_ids[16]; + int i; + u32 proxy_id; + + for (i = 0; i < ARRAY_SIZE(req_ids); i++) { + proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]); + + if (!(proxy_id & NTB_CTRL_REQ_ID_EN)) + break; + + req_ids[i] = ((proxy_id >> 1) & 0xFF); + } + + return config_req_id_table(sndev, mmio_ctrl, req_ids, i); +} + +/* + * In crosslink configuration there is a virtual partition in the + * middle of the two switches. The BARs in this partition have to be + * enumerated and assigned addresses. + */ +static int crosslink_enum_partition(struct switchtec_ntb *sndev, + u64 *bar_addrs) +{ + struct part_cfg_regs __iomem *part_cfg = + &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition]; + u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF; + struct pff_csr_regs __iomem *mmio_pff = + &sndev->stdev->mmio_pff_csr[pff]; + const u64 bar_space = 0x1000000000LL; + u64 bar_addr; + int bar_cnt = 0; + int i; + + iowrite16(0x6, &mmio_pff->pcicmd); + + for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) { + iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]); + bar_addr = ioread64(&mmio_pff->pci_bar64[i]); + bar_addr &= ~0xf; + + dev_dbg(&sndev->stdev->dev, + "Crosslink BAR%d addr: %llx\n", + i*2, bar_addr); + + if (bar_addr != bar_space * i) + continue; + + bar_addrs[bar_cnt++] = bar_addr; + } + + return bar_cnt; +} + +static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev) +{ + int rc; + int bar = sndev->direct_mw_to_bar[0]; + const int ntb_lut_idx = 1; + u64 bar_addrs[6]; + u64 addr; + int offset; + int bar_cnt; + + if (!crosslink_is_enabled(sndev)) + return 0; + + dev_info(&sndev->stdev->dev, "Using crosslink configuration\n"); + sndev->ntb.topo = NTB_TOPO_CROSSLINK; + + bar_cnt = crosslink_enum_partition(sndev, bar_addrs); + if (bar_cnt < sndev->nr_direct_mw + 1) { + dev_err(&sndev->stdev->dev, + "Error enumerating crosslink partition\n"); + return -EINVAL; + } + + addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET + + SWITCHTEC_NTB_REG_DBMSG_OFFSET + + sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition); + + offset = addr & (LUT_SIZE - 1); + addr -= offset; + + rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx, + sndev->peer_partition, addr); + if (rc) + return rc; + + rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1], + bar_cnt - 1); + if (rc) + return rc; + + rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl); + if (rc) + return rc; + + sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar, + LUT_SIZE, LUT_SIZE); + if (!sndev->mmio_xlink_win) { + rc = -ENOMEM; + return rc; + } + + sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset; + sndev->nr_rsvd_luts++; + + crosslink_init_dbmsgs(sndev); + + return 0; +} + +static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev) +{ + if (sndev->mmio_xlink_win) + pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win); +} + +static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl) +{ + int i; + int cnt = 0; + + for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) { + u32 r = ioread32(&ctrl->bar_entry[i].ctl); + + if (r & NTB_CTRL_BAR_VALID) + map[cnt++] = i; + } + + return cnt; +} + +static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev) +{ + sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar, + sndev->mmio_self_ctrl); + + sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries); + sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw); + + dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n", + sndev->nr_direct_mw, sndev->nr_lut_mw); + + sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar, + sndev->mmio_peer_ctrl); + + sndev->peer_nr_lut_mw = + ioread16(&sndev->mmio_peer_ctrl->lut_table_entries); + sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw); + + dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n", + sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw); + +} + +/* + * There are 64 doorbells in the switch hardware but this is + * shared among all partitions. So we must split them in half + * (32 for each partition). However, the message interrupts are + * also shared with the top 4 doorbells so we just limit this to + * 28 doorbells per partition. + * + * In crosslink mode, each side has it's own dbmsg register so + * they can each use all 60 of the available doorbells. + */ +static void switchtec_ntb_init_db(struct switchtec_ntb *sndev) +{ + sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL; + + if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) { + sndev->db_shift = 0; + sndev->db_peer_shift = 0; + sndev->db_valid_mask = sndev->db_mask; + } else if (sndev->self_partition < sndev->peer_partition) { + sndev->db_shift = 0; + sndev->db_peer_shift = 32; + sndev->db_valid_mask = 0x0FFFFFFF; + } else { + sndev->db_shift = 32; + sndev->db_peer_shift = 0; + sndev->db_valid_mask = 0x0FFFFFFF; + } + + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); + iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, + &sndev->mmio_peer_dbmsg->odb_mask); + + dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n", + sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask); +} + +static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev) +{ + int i; + u32 msg_map = 0; + + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { + int m = i | sndev->peer_partition << 2; + + msg_map |= m << i * 8; + } + + iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map); + + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) + iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK, + &sndev->mmio_self_dbmsg->imsg[i]); +} + +static int +switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev) +{ + int req_ids[2]; + + /* + * Root Complex Requester ID (which is 0:00.0) + */ + req_ids[0] = 0; + + /* + * Host Bridge Requester ID (as read from the mmap address) + */ + req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id); + + return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids, + ARRAY_SIZE(req_ids)); +} + +static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev) +{ + int i; + + memset(sndev->self_shared, 0, LUT_SIZE); + sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC; + sndev->self_shared->partition_id = sndev->stdev->partition; + + for (i = 0; i < sndev->nr_direct_mw; i++) { + int bar = sndev->direct_mw_to_bar[i]; + resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar); + + if (i == 0) + sz = min_t(resource_size_t, sz, + LUT_SIZE * sndev->nr_lut_mw); + + sndev->self_shared->mw_sizes[i] = sz; + } + + for (i = 0; i < sndev->nr_lut_mw; i++) { + int idx = sndev->nr_direct_mw + i; + + sndev->self_shared->mw_sizes[idx] = LUT_SIZE; + } +} + +static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev) +{ + int self_bar = sndev->direct_mw_to_bar[0]; + int rc; + + sndev->nr_rsvd_luts++; + sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev, + LUT_SIZE, + &sndev->self_shared_dma, + GFP_KERNEL); + if (!sndev->self_shared) { + dev_err(&sndev->stdev->dev, + "unable to allocate memory for shared mw\n"); + return -ENOMEM; + } + + switchtec_ntb_init_shared(sndev); + + rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0, + sndev->self_partition, + sndev->self_shared_dma); + if (rc) + goto unalloc_and_exit; + + sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE); + if (!sndev->peer_shared) { + rc = -ENOMEM; + goto unalloc_and_exit; + } + + dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n"); + return 0; + +unalloc_and_exit: + dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, + sndev->self_shared, sndev->self_shared_dma); + + return rc; +} + +static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev) +{ + if (sndev->peer_shared) + pci_iounmap(sndev->stdev->pdev, sndev->peer_shared); + + if (sndev->self_shared) + dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, + sndev->self_shared, + sndev->self_shared_dma); + sndev->nr_rsvd_luts--; +} + +static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev) +{ + struct switchtec_ntb *sndev = dev; + + dev_dbg(&sndev->stdev->dev, "doorbell\n"); + + ntb_db_event(&sndev->ntb, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev) +{ + int i; + struct switchtec_ntb *sndev = dev; + + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { + u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]); + + if (msg & NTB_DBMSG_IMSG_STATUS) { + dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", + i, (u32)msg); + iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status); + + if (i == LINK_MESSAGE) + switchtec_ntb_check_link(sndev, msg); + } + } + + return IRQ_HANDLED; +} + +static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev) +{ + int i; + int rc; + int doorbell_irq = 0; + int message_irq = 0; + int event_irq; + int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map); + + event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number); + + while (doorbell_irq == event_irq) + doorbell_irq++; + while (message_irq == doorbell_irq || + message_irq == event_irq) + message_irq++; + + dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n", + event_irq, doorbell_irq, message_irq); + + for (i = 0; i < idb_vecs - 4; i++) + iowrite8(doorbell_irq, + &sndev->mmio_self_dbmsg->idb_vec_map[i]); + + for (; i < idb_vecs; i++) + iowrite8(message_irq, + &sndev->mmio_self_dbmsg->idb_vec_map[i]); + + sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq); + sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq); + + rc = request_irq(sndev->doorbell_irq, + switchtec_ntb_doorbell_isr, 0, + "switchtec_ntb_doorbell", sndev); + if (rc) + return rc; + + rc = request_irq(sndev->message_irq, + switchtec_ntb_message_isr, 0, + "switchtec_ntb_message", sndev); + if (rc) { + free_irq(sndev->doorbell_irq, sndev); + return rc; + } + + return 0; +} + +static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev) +{ + free_irq(sndev->doorbell_irq, sndev); + free_irq(sndev->message_irq, sndev); +} + +static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev) +{ + int rc; + + if (crosslink_is_enabled(sndev)) + return 0; + + dev_info(&sndev->stdev->dev, "reinitialize shared memory window\n"); + rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0, + sndev->self_partition, + sndev->self_shared_dma); + return rc; +} + +static int switchtec_ntb_add(struct device *dev) +{ + struct switchtec_dev *stdev = to_stdev(dev); + struct switchtec_ntb *sndev; + int rc; + + stdev->sndev = NULL; + + if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8)) + return -ENODEV; + + sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev)); + if (!sndev) + return -ENOMEM; + + sndev->stdev = stdev; + rc = switchtec_ntb_init_sndev(sndev); + if (rc) + goto free_and_exit; + + switchtec_ntb_init_mw(sndev); + + rc = switchtec_ntb_init_req_id_table(sndev); + if (rc) + goto free_and_exit; + + rc = switchtec_ntb_init_crosslink(sndev); + if (rc) + goto free_and_exit; + + switchtec_ntb_init_db(sndev); + switchtec_ntb_init_msgs(sndev); + + rc = switchtec_ntb_init_shared_mw(sndev); + if (rc) + goto deinit_crosslink; + + rc = switchtec_ntb_init_db_msg_irq(sndev); + if (rc) + goto deinit_shared_and_exit; + + /* + * If this host crashed, the other host may think the link is + * still up. Tell them to force it down (it will go back up + * once we register the ntb device). + */ + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN); + + rc = ntb_register_device(&sndev->ntb); + if (rc) + goto deinit_and_exit; + + stdev->sndev = sndev; + stdev->link_notifier = switchtec_ntb_link_notification; + dev_info(dev, "NTB device registered\n"); + + return 0; + +deinit_and_exit: + switchtec_ntb_deinit_db_msg_irq(sndev); +deinit_shared_and_exit: + switchtec_ntb_deinit_shared_mw(sndev); +deinit_crosslink: + switchtec_ntb_deinit_crosslink(sndev); +free_and_exit: + kfree(sndev); + dev_err(dev, "failed to register ntb device: %d\n", rc); + return rc; +} + +static void switchtec_ntb_remove(struct device *dev) +{ + struct switchtec_dev *stdev = to_stdev(dev); + struct switchtec_ntb *sndev = stdev->sndev; + + if (!sndev) + return; + + stdev->link_notifier = NULL; + stdev->sndev = NULL; + ntb_unregister_device(&sndev->ntb); + switchtec_ntb_deinit_db_msg_irq(sndev); + switchtec_ntb_deinit_shared_mw(sndev); + switchtec_ntb_deinit_crosslink(sndev); + kfree(sndev); + dev_info(dev, "ntb device unregistered\n"); +} + +static struct class_interface switchtec_interface = { + .add_dev = switchtec_ntb_add, + .remove_dev = switchtec_ntb_remove, +}; + +static int __init switchtec_ntb_init(void) +{ + switchtec_interface.class = switchtec_class; + return class_interface_register(&switchtec_interface); +} +module_init(switchtec_ntb_init); + +static void __exit switchtec_ntb_exit(void) +{ + class_interface_unregister(&switchtec_interface); +} +module_exit(switchtec_ntb_exit); -- cgit v1.2.3